diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-20 01:38:41 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-20 01:38:41 +0300 |
commit | d5e2d00898bdfed9586472679760fc81a2ca2d02 (patch) | |
tree | 55ac781983bf7144230ad8a5a995ce02b6ac39a1 /drivers/misc | |
parent | 31e182363b39d84031eadf0caf6d99fd9eb056f0 (diff) | |
parent | 6e669f085d595cb6053920832c89f1a13067db44 (diff) | |
download | linux-d5e2d00898bdfed9586472679760fc81a2ca2d02.tar.xz |
Merge tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"This was delayed a day or two by some build-breakage on old toolchains
which we've now fixed.
There's two PCI commits both acked by Bjorn.
There's one commit to mm/hugepage.c which is (co)authored by Kirill.
Highlights:
- Restructure Linux PTE on Book3S/64 to Radix format from Paul
Mackerras
- Book3s 64 MMU cleanup in preparation for Radix MMU from Aneesh
Kumar K.V
- Add POWER9 cputable entry from Michael Neuling
- FPU/Altivec/VSX save/restore optimisations from Cyril Bur
- Add support for new ftrace ABI on ppc64le from Torsten Duwe
Various cleanups & minor fixes from:
- Adam Buchbinder, Andrew Donnellan, Balbir Singh, Christophe Leroy,
Cyril Bur, Luis Henriques, Madhavan Srinivasan, Pan Xinhui, Russell
Currey, Sukadev Bhattiprolu, Suraj Jitindar Singh.
General:
- atomics: Allow architectures to define their own __atomic_op_*
helpers from Boqun Feng
- Implement atomic{, 64}_*_return_* variants and acquire/release/
relaxed variants for (cmp)xchg from Boqun Feng
- Add powernv_defconfig from Jeremy Kerr
- Fix BUG_ON() reporting in real mode from Balbir Singh
- Add xmon command to dump OPAL msglog from Andrew Donnellan
- Add xmon command to dump process/task similar to ps(1) from Douglas
Miller
- Clean up memory hotplug failure paths from David Gibson
pci/eeh:
- Redesign SR-IOV on PowerNV to give absolute isolation between VFs
from Wei Yang.
- EEH Support for SRIOV VFs from Wei Yang and Gavin Shan.
- PCI/IOV: Rename and export virtfn_{add, remove} from Wei Yang
- PCI: Add pcibios_bus_add_device() weak function from Wei Yang
- MAINTAINERS: Update EEH details and maintainership from Russell
Currey
cxl:
- Support added to the CXL driver for running on both bare-metal and
hypervisor systems, from Christophe Lombard and Frederic Barrat.
- Ignore probes for virtual afu pci devices from Vaibhav Jain
perf:
- Export Power8 generic and cache events to sysfs from Sukadev
Bhattiprolu
- hv-24x7: Fix usage with chip events, display change in counter
values, display domain indices in sysfs, eliminate domain suffix in
event names, from Sukadev Bhattiprolu
Freescale:
- Updates from Scott: "Highlights include 8xx optimizations, 32-bit
checksum optimizations, 86xx consolidation, e5500/e6500 cpu
hotplug, more fman and other dt bits, and minor fixes/cleanup"
* tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (179 commits)
powerpc: Fix unrecoverable SLB miss during restore_math()
powerpc/8xx: Fix do_mtspr_cpu6() build on older compilers
powerpc/rcpm: Fix build break when SMP=n
powerpc/book3e-64: Use hardcoded mttmr opcode
powerpc/fsl/dts: Add "jedec,spi-nor" flash compatible
powerpc/T104xRDB: add tdm riser card node to device tree
powerpc32: PAGE_EXEC required for inittext
powerpc/mpc85xx: Add pcsphy nodes to FManV3 device tree
powerpc/mpc85xx: Add MDIO bus muxing support to the board device tree(s)
powerpc/86xx: Introduce and use common dtsi
powerpc/86xx: Update device tree
powerpc/86xx: Move dts files to fsl directory
powerpc/86xx: Switch to kconfig fragments approach
powerpc/86xx: Update defconfigs
powerpc/86xx: Consolidate common platform code
powerpc32: Remove one insn in mulhdu
powerpc32: small optimisation in flush_icache_range()
powerpc: Simplify test in __dma_sync()
powerpc32: move xxxxx_dcache_range() functions inline
powerpc32: Remove clear_pages() and define clear_page() inline
...
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/cxl/Makefile | 1 | ||||
-rw-r--r-- | drivers/misc/cxl/api.c | 83 | ||||
-rw-r--r-- | drivers/misc/cxl/base.c | 32 | ||||
-rw-r--r-- | drivers/misc/cxl/context.c | 11 | ||||
-rw-r--r-- | drivers/misc/cxl/cxl.h | 288 | ||||
-rw-r--r-- | drivers/misc/cxl/debugfs.c | 4 | ||||
-rw-r--r-- | drivers/misc/cxl/fault.c | 25 | ||||
-rw-r--r-- | drivers/misc/cxl/file.c | 28 | ||||
-rw-r--r-- | drivers/misc/cxl/flash.c | 538 | ||||
-rw-r--r-- | drivers/misc/cxl/guest.c | 1177 | ||||
-rw-r--r-- | drivers/misc/cxl/hcalls.c | 647 | ||||
-rw-r--r-- | drivers/misc/cxl/hcalls.h | 204 | ||||
-rw-r--r-- | drivers/misc/cxl/irq.c | 309 | ||||
-rw-r--r-- | drivers/misc/cxl/main.c | 122 | ||||
-rw-r--r-- | drivers/misc/cxl/native.c | 469 | ||||
-rw-r--r-- | drivers/misc/cxl/of.c | 513 | ||||
-rw-r--r-- | drivers/misc/cxl/pci.c | 267 | ||||
-rw-r--r-- | drivers/misc/cxl/sysfs.c | 123 | ||||
-rw-r--r-- | drivers/misc/cxl/trace.h | 193 | ||||
-rw-r--r-- | drivers/misc/cxl/vphb.c | 167 |
20 files changed, 4469 insertions, 732 deletions
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index be2ac5ce349f..8a55c1aa11aa 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile @@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror cxl-y += main.o file.o irq.o fault.o native.o cxl-y += context.o sysfs.o debugfs.o pci.o trace.o cxl-y += vphb.o api.o +cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o obj-$(CONFIG_CXL) += cxl.o obj-$(CONFIG_CXL_BASE) += base.o diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index ea3eeb7011e1..2107c948406d 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -51,8 +51,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) if (rc) goto err_mapping; - cxl_assign_psn_space(ctx); - return ctx; err_mapping: @@ -78,7 +76,6 @@ struct device *cxl_get_phys_dev(struct pci_dev *dev) return afu->adapter->dev.parent; } -EXPORT_SYMBOL_GPL(cxl_get_phys_dev); int cxl_release_context(struct cxl_context *ctx) { @@ -91,28 +88,11 @@ int cxl_release_context(struct cxl_context *ctx) } EXPORT_SYMBOL_GPL(cxl_release_context); -int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) -{ - if (num == 0) - num = ctx->afu->pp_irqs; - return afu_allocate_irqs(ctx, num); -} -EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); - -void cxl_free_afu_irqs(struct cxl_context *ctx) -{ - afu_irq_name_free(ctx); - cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); -} -EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); - static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) { __u16 range; int r; - WARN_ON(num == 0); - for (r = 0; r < CXL_IRQ_RANGES; r++) { range = ctx->irqs.range[r]; if (num < range) { @@ -123,6 +103,44 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) return 0; } +int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) +{ + int res; + irq_hw_number_t hwirq; + + if (num == 0) + num = ctx->afu->pp_irqs; + res = afu_allocate_irqs(ctx, num); + if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { + /* In a guest, the PSL interrupt is not multiplexed. It was + * allocated above, and we need to set its handler + */ + hwirq = cxl_find_afu_irq(ctx, 0); + if (hwirq) + cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); + } + return res; +} +EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); + +void cxl_free_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) { + hwirq = cxl_find_afu_irq(ctx, 0); + if (hwirq) { + virq = irq_find_mapping(NULL, hwirq); + if (virq) + cxl_unmap_irq(virq, ctx); + } + } + afu_irq_name_free(ctx); + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); +} +EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); + int cxl_map_afu_irq(struct cxl_context *ctx, int num, irq_handler_t handler, void *cookie, char *name) { @@ -178,7 +196,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, cxl_ctx_get(); - if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) { + if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { put_pid(ctx->pid); cxl_ctx_put(); goto out; @@ -193,7 +211,7 @@ EXPORT_SYMBOL_GPL(cxl_start_context); int cxl_process_element(struct cxl_context *ctx) { - return ctx->pe; + return ctx->external_pe; } EXPORT_SYMBOL_GPL(cxl_process_element); @@ -207,7 +225,6 @@ EXPORT_SYMBOL_GPL(cxl_stop_context); void cxl_set_master(struct cxl_context *ctx) { ctx->master = true; - cxl_assign_psn_space(ctx); } EXPORT_SYMBOL_GPL(cxl_set_master); @@ -325,15 +342,11 @@ EXPORT_SYMBOL_GPL(cxl_start_work); void __iomem *cxl_psa_map(struct cxl_context *ctx) { - struct cxl_afu *afu = ctx->afu; - int rc; - - rc = cxl_afu_check_and_enable(afu); - if (rc) + if (ctx->status != STARTED) return NULL; pr_devel("%s: psn_phys%llx size:%llx\n", - __func__, afu->psn_phys, afu->adapter->ps_size); + __func__, ctx->psn_phys, ctx->psn_size); return ioremap(ctx->psn_phys, ctx->psn_size); } EXPORT_SYMBOL_GPL(cxl_psa_map); @@ -349,11 +362,11 @@ int cxl_afu_reset(struct cxl_context *ctx) struct cxl_afu *afu = ctx->afu; int rc; - rc = __cxl_afu_reset(afu); + rc = cxl_ops->afu_reset(afu); if (rc) return rc; - return cxl_afu_check_and_enable(afu); + return cxl_ops->afu_check_and_enable(afu); } EXPORT_SYMBOL_GPL(cxl_afu_reset); @@ -363,3 +376,11 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu, afu->adapter->perst_same_image = perst_reloads_same_image; } EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); + +ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + + return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); +} +EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c index a9f0dd3255a2..9b90ec6c07cd 100644 --- a/drivers/misc/cxl/base.c +++ b/drivers/misc/cxl/base.c @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <asm/errno.h> #include <misc/cxl-base.h> +#include <linux/of_platform.h> #include "cxl.h" /* protected by rcu */ @@ -84,3 +85,34 @@ void unregister_cxl_calls(struct cxl_calls *calls) synchronize_rcu(); } EXPORT_SYMBOL_GPL(unregister_cxl_calls); + +int cxl_update_properties(struct device_node *dn, + struct property *new_prop) +{ + return of_update_property(dn, new_prop); +} +EXPORT_SYMBOL_GPL(cxl_update_properties); + +static int __init cxl_base_init(void) +{ + struct device_node *np = NULL; + struct platform_device *dev; + int count = 0; + + /* + * Scan for compatible devices in guest only + */ + if (cpu_has_feature(CPU_FTR_HVMODE)) + return 0; + + while ((np = of_find_compatible_node(np, NULL, + "ibm,coherent-platform-facility"))) { + dev = of_platform_device_create(np, NULL, NULL); + if (dev) + count++; + } + pr_devel("Found %d cxl device(s)\n", count); + return 0; +} + +module_init(cxl_base_init); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 262b88eac414..10370f280500 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -95,7 +95,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, return i; ctx->pe = i; - ctx->elem = &ctx->afu->spa[i]; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + ctx->elem = &ctx->afu->native->spa[i]; + ctx->external_pe = ctx->pe; + } else { + ctx->external_pe = -1; /* assigned when attaching */ + } ctx->pe_inserted = false; /* @@ -214,8 +219,8 @@ int __detach_context(struct cxl_context *ctx) /* Only warn if we detached while the link was OK. * If detach fails when hw is down, we don't care. */ - WARN_ON(cxl_detach_process(ctx) && - cxl_adapter_link_ok(ctx->afu->adapter)); + WARN_ON(cxl_ops->detach_process(ctx) && + cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); flush_work(&ctx->fault_work); /* Only needed for dedicated process */ /* release the reference to the group leader and mm handling pid */ diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index a521bc72cec2..38e21cf7806e 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -324,6 +324,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_MODE_TIME_SLICED 0x4 #define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) +#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ +#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) +#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) + enum cxl_context_status { CLOSED, OPENED, @@ -336,6 +340,12 @@ enum prefault_modes { CXL_PREFAULT_ALL, }; +enum cxl_attrs { + CXL_ADAPTER_ATTRS, + CXL_AFU_MASTER_ATTRS, + CXL_AFU_ATTRS, +}; + struct cxl_sste { __be64 esid_data; __be64 vsid_data; @@ -344,18 +354,46 @@ struct cxl_sste { #define to_cxl_adapter(d) container_of(d, struct cxl, dev) #define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) -struct cxl_afu { +struct cxl_afu_native { + void __iomem *p1n_mmio; + void __iomem *afu_desc_mmio; irq_hw_number_t psl_hwirq; + unsigned int psl_virq; + struct mutex spa_mutex; + /* + * Only the first part of the SPA is used for the process element + * linked list. The only other part that software needs to worry about + * is sw_command_status, which we store a separate pointer to. + * Everything else in the SPA is only used by hardware + */ + struct cxl_process_element *spa; + __be64 *sw_command_status; + unsigned int spa_size; + int spa_order; + int spa_max_procs; + u64 pp_offset; +}; + +struct cxl_afu_guest { + u64 handle; + phys_addr_t p2n_phys; + u64 p2n_size; + int max_ints; + struct mutex recovery_lock; + int previous_state; +}; + +struct cxl_afu { + struct cxl_afu_native *native; + struct cxl_afu_guest *guest; irq_hw_number_t serr_hwirq; - char *err_irq_name; - char *psl_irq_name; unsigned int serr_virq; - void __iomem *p1n_mmio; + char *psl_irq_name; + char *err_irq_name; void __iomem *p2n_mmio; phys_addr_t psn_phys; - u64 pp_offset; u64 pp_size; - void __iomem *afu_desc_mmio; + struct cxl *adapter; struct device dev; struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; @@ -363,26 +401,12 @@ struct cxl_afu { struct idr contexts_idr; struct dentry *debugfs; struct mutex contexts_lock; - struct mutex spa_mutex; spinlock_t afu_cntl_lock; /* AFU error buffer fields and bin attribute for sysfs */ u64 eb_len, eb_offset; struct bin_attribute attr_eb; - /* - * Only the first part of the SPA is used for the process element - * linked list. The only other part that software needs to worry about - * is sw_command_status, which we store a separate pointer to. - * Everything else in the SPA is only used by hardware - */ - struct cxl_process_element *spa; - __be64 *sw_command_status; - unsigned int spa_size; - int spa_order; - int spa_max_procs; - unsigned int psl_virq; - /* pointer to the vphb */ struct pci_controller *phb; @@ -421,6 +445,12 @@ struct cxl_irq_name { char *name; }; +struct irq_avail { + irq_hw_number_t offset; + irq_hw_number_t range; + unsigned long *bitmap; +}; + /* * This is a cxl context. If the PSL is in dedicated mode, there will be one * of these per AFU. If in AFU directed there can be lots of these. @@ -476,7 +506,19 @@ struct cxl_context { struct cxl_process_element *elem; - int pe; /* process element handle */ + /* + * pe is the process element handle, assigned by this driver when the + * context is initialized. + * + * external_pe is the PE shown outside of cxl. + * On bare-metal, pe=external_pe, because we decide what the handle is. + * In a guest, we only find out about the pe used by pHyp when the + * context is attached, and that's the value we want to report outside + * of cxl. + */ + int pe; + int external_pe; + u32 irq_count; bool pe_inserted; bool master; @@ -488,11 +530,34 @@ struct cxl_context { struct rcu_head rcu; }; -struct cxl { +struct cxl_native { + u64 afu_desc_off; + u64 afu_desc_size; void __iomem *p1_mmio; void __iomem *p2_mmio; irq_hw_number_t err_hwirq; unsigned int err_virq; + u64 ps_off; +}; + +struct cxl_guest { + struct platform_device *pdev; + int irq_nranges; + struct cdev cdev; + irq_hw_number_t irq_base_offset; + struct irq_avail *irq_avail; + spinlock_t irq_alloc_lock; + u64 handle; + char *status; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem; +}; + +struct cxl { + struct cxl_native *native; + struct cxl_guest *guest; spinlock_t afu_list_lock; struct cxl_afu *afu[CXL_MAX_SLICES]; struct device dev; @@ -503,9 +568,6 @@ struct cxl { struct bin_attribute cxl_attr; int adapter_num; int user_irqs; - u64 afu_desc_off; - u64 afu_desc_size; - u64 ps_off; u64 ps_size; u16 psl_rev; u16 base_image; @@ -519,13 +581,15 @@ struct cxl { bool perst_same_image; }; -int cxl_alloc_one_irq(struct cxl *adapter); -void cxl_release_one_irq(struct cxl *adapter, int hwirq); -int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); -void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); -int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); +int cxl_pci_alloc_one_irq(struct cxl *adapter); +void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq); +int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); +void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); +int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); int cxl_update_image_control(struct cxl *adapter); -int cxl_reset(struct cxl *adapter); +int cxl_pci_reset(struct cxl *adapter); +void cxl_pci_release_afu(struct device *dev); +ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); /* common == phyp + powernv */ struct cxl_process_element_common { @@ -555,29 +619,32 @@ struct cxl_process_element { __be32 software_state; } __packed; -static inline bool cxl_adapter_link_ok(struct cxl *cxl) +static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu) { struct pci_dev *pdev; - pdev = to_pci_dev(cxl->dev.parent); - return !pci_channel_offline(pdev); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + pdev = to_pci_dev(cxl->dev.parent); + return !pci_channel_offline(pdev); + } + return true; } static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) { WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); - return cxl->p1_mmio + cxl_reg_off(reg); + return cxl->native->p1_mmio + cxl_reg_off(reg); } static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) { - if (likely(cxl_adapter_link_ok(cxl))) + if (likely(cxl_adapter_link_ok(cxl, NULL))) out_be64(_cxl_p1_addr(cxl, reg), val); } static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) { - if (likely(cxl_adapter_link_ok(cxl))) + if (likely(cxl_adapter_link_ok(cxl, NULL))) return in_be64(_cxl_p1_addr(cxl, reg)); else return ~0ULL; @@ -586,18 +653,18 @@ static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) { WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); - return afu->p1n_mmio + cxl_reg_off(reg); + return afu->native->p1n_mmio + cxl_reg_off(reg); } static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) { - if (likely(cxl_adapter_link_ok(afu->adapter))) + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) out_be64(_cxl_p1n_addr(afu, reg), val); } static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg) { - if (likely(cxl_adapter_link_ok(afu->adapter))) + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) return in_be64(_cxl_p1n_addr(afu, reg)); else return ~0ULL; @@ -610,39 +677,19 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val) { - if (likely(cxl_adapter_link_ok(afu->adapter))) + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) out_be64(_cxl_p2n_addr(afu, reg), val); } static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) { - if (likely(cxl_adapter_link_ok(afu->adapter))) + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) return in_be64(_cxl_p2n_addr(afu, reg)); else return ~0ULL; } -static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off) -{ - if (likely(cxl_adapter_link_ok(afu->adapter))) - return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + - ((cr) * (afu)->crs_len) + (off)); - else - return ~0ULL; -} - -static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off) -{ - if (likely(cxl_adapter_link_ok(afu->adapter))) - return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + - ((cr) * (afu)->crs_len) + (off)); - else - return 0xffffffff; -} -u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off); -u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off); - -ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, +ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, loff_t off, size_t count); @@ -652,13 +699,14 @@ struct cxl_calls { }; int register_cxl_calls(struct cxl_calls *calls); void unregister_cxl_calls(struct cxl_calls *calls); +int cxl_update_properties(struct device_node *dn, struct property *new_prop); -int cxl_alloc_adapter_nr(struct cxl *adapter); void cxl_remove_adapter_nr(struct cxl *adapter); int cxl_alloc_spa(struct cxl_afu *afu); void cxl_release_spa(struct cxl_afu *afu); +dev_t cxl_get_dev(void); int cxl_file_init(void); void cxl_file_exit(void); int cxl_register_adapter(struct cxl *adapter); @@ -679,21 +727,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu); int cxl_sysfs_afu_m_add(struct cxl_afu *afu); void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); -int cxl_afu_activate_mode(struct cxl_afu *afu, int mode); -int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode); -int cxl_afu_deactivate_mode(struct cxl_afu *afu); +struct cxl *cxl_alloc_adapter(void); +struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice); int cxl_afu_select_best_mode(struct cxl_afu *afu); -int cxl_register_psl_irq(struct cxl_afu *afu); -void cxl_release_psl_irq(struct cxl_afu *afu); -int cxl_register_psl_err_irq(struct cxl *adapter); -void cxl_release_psl_err_irq(struct cxl *adapter); -int cxl_register_serr_irq(struct cxl_afu *afu); -void cxl_release_serr_irq(struct cxl_afu *afu); +int cxl_native_register_psl_irq(struct cxl_afu *afu); +void cxl_native_release_psl_irq(struct cxl_afu *afu); +int cxl_native_register_psl_err_irq(struct cxl *adapter); +void cxl_native_release_psl_err_irq(struct cxl *adapter); +int cxl_native_register_serr_irq(struct cxl_afu *afu); +void cxl_native_release_serr_irq(struct cxl_afu *afu); int afu_register_irqs(struct cxl_context *ctx, u32 count); void afu_release_irqs(struct cxl_context *ctx, void *cookie); void afu_irq_name_free(struct cxl_context *ctx); -irqreturn_t cxl_slice_irq_err(int irq, void *data); int cxl_debugfs_init(void); void cxl_debugfs_exit(void); @@ -707,6 +753,7 @@ void cxl_prefault(struct cxl_context *ctx, u64 wed); struct cxl *get_cxl_adapter(int num); int cxl_alloc_sst(struct cxl_context *ctx); +void cxl_dump_debug_buffer(void *addr, size_t size); void init_cxl_native(void); @@ -720,40 +767,54 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, void cxl_unmap_irq(unsigned int virq, void *cookie); int __detach_context(struct cxl_context *ctx); -/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */ +/* + * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined + * in PAPR. + * A word about endianness: a pointer to this structure is passed when + * calling the hcall. However, it is not a block of memory filled up by + * the hypervisor. The return values are found in registers, and copied + * one by one when returning from the hcall. See the end of the call to + * plpar_hcall9() in hvCall.S + * As a consequence: + * - we don't need to do any endianness conversion + * - the pid and tid are an exception. They are 32-bit values returned in + * the same 64-bit register. So we do need to worry about byte ordering. + */ struct cxl_irq_info { u64 dsisr; u64 dar; u64 dsr; +#ifndef CONFIG_CPU_LITTLE_ENDIAN u32 pid; u32 tid; +#else + u32 tid; + u32 pid; +#endif u64 afu_err; u64 errstat; - u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */ + u64 proc_handle; + u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */ }; void cxl_assign_psn_space(struct cxl_context *ctx); -int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, - u64 amr); -int cxl_detach_process(struct cxl_context *ctx); - -int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info); -int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); +irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); +int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, + void *cookie, irq_hw_number_t *dest_hwirq, + unsigned int *dest_virq, const char *name); int cxl_check_error(struct cxl_afu *afu); int cxl_afu_slbia(struct cxl_afu *afu); int cxl_tlb_slb_invalidate(struct cxl *adapter); int cxl_afu_disable(struct cxl_afu *afu); -int __cxl_afu_reset(struct cxl_afu *afu); -int cxl_afu_check_and_enable(struct cxl_afu *afu); int cxl_psl_purge(struct cxl_afu *afu); void cxl_stop_trace(struct cxl *cxl); int cxl_pci_vphb_add(struct cxl_afu *afu); -void cxl_pci_vphb_reconfigure(struct cxl_afu *afu); void cxl_pci_vphb_remove(struct cxl_afu *afu); extern struct pci_driver cxl_pci_driver; +extern struct platform_driver cxl_of_driver; int afu_allocate_irqs(struct cxl_context *ctx, u32 count); int afu_open(struct inode *inode, struct file *file); @@ -764,4 +825,61 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll); ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); extern const struct file_operations afu_fops; +struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev); +void cxl_guest_remove_adapter(struct cxl *adapter); +int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np); +int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np); +ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); +ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len); +int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np); +void cxl_guest_remove_afu(struct cxl_afu *afu); +int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np); +int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np); +int cxl_guest_add_chardev(struct cxl *adapter); +void cxl_guest_remove_chardev(struct cxl *adapter); +void cxl_guest_reload_module(struct cxl *adapter); +int cxl_of_probe(struct platform_device *pdev); + +struct cxl_backend_ops { + struct module *module; + int (*adapter_reset)(struct cxl *adapter); + int (*alloc_one_irq)(struct cxl *adapter); + void (*release_one_irq)(struct cxl *adapter, int hwirq); + int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num); + void (*release_irq_ranges)(struct cxl_irq_ranges *irqs, + struct cxl *adapter); + int (*setup_irq)(struct cxl *adapter, unsigned int hwirq, + unsigned int virq); + irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx, + u64 dsisr, u64 errstat); + irqreturn_t (*psl_interrupt)(int irq, void *data); + int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); + int (*attach_process)(struct cxl_context *ctx, bool kernel, + u64 wed, u64 amr); + int (*detach_process)(struct cxl_context *ctx); + bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); + bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); + void (*release_afu)(struct device *dev); + ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf, + loff_t off, size_t count); + int (*afu_check_and_enable)(struct cxl_afu *afu); + int (*afu_activate_mode)(struct cxl_afu *afu, int mode); + int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode); + int (*afu_reset)(struct cxl_afu *afu); + int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val); + int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val); + int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val); + int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val); + int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val); + int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val); + int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val); + ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count); +}; +extern const struct cxl_backend_ops cxl_native_ops; +extern const struct cxl_backend_ops cxl_guest_ops; +extern const struct cxl_backend_ops *cxl_ops; + +/* check if the given pci_dev is on the the cxl vphb bus */ +bool cxl_pci_is_vphb_device(struct pci_dev *dev); #endif diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 18df6f44af2a..5751899e0c17 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c @@ -118,6 +118,10 @@ void cxl_debugfs_afu_remove(struct cxl_afu *afu) int __init cxl_debugfs_init(void) { struct dentry *ent; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return 0; + ent = debugfs_create_dir("cxl", NULL); if (IS_ERR(ent)) return PTR_ERR(ent); diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 81c3f75b7330..9a8650bcb042 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -101,7 +101,7 @@ static void cxl_ack_ae(struct cxl_context *ctx) { unsigned long flags; - cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); spin_lock_irqsave(&ctx->lock, flags); ctx->pending_fault = true; @@ -125,7 +125,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx, else { mb(); /* Order seg table write to TFC MMIO write */ - cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); } return IRQ_HANDLED; @@ -163,7 +163,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, local_irq_restore(flags); pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); - cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); } /* @@ -254,14 +254,17 @@ void cxl_handle_fault(struct work_struct *fault_work) u64 dar = ctx->dar; struct mm_struct *mm = NULL; - if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || - cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || - cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { - /* Most likely explanation is harmless - a dedicated process - * has detached and these were cleared by the PSL purge, but - * warn about it just in case */ - dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); - return; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || + cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || + cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { + /* Most likely explanation is harmless - a dedicated + * process has detached and these were cleared by the + * PSL purge, but warn about it just in case + */ + dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); + return; + } } /* Early return if the context is being / has been detached */ diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 783337d22f36..eec468f1612f 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -26,9 +26,7 @@ #include "trace.h" #define CXL_NUM_MINORS 256 /* Total to reserve */ -#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ -#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) @@ -36,7 +34,6 @@ #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) -#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) @@ -79,7 +76,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) if (!afu->current_mode) goto err_put_afu; - if (!cxl_adapter_link_ok(adapter)) { + if (!cxl_ops->link_ok(adapter, afu)) { rc = -EIO; goto err_put_afu; } @@ -210,8 +207,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); - if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, - amr))) { + if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, + amr))) { afu_release_irqs(ctx, ctx); goto out; } @@ -222,12 +219,13 @@ out: mutex_unlock(&ctx->status_mutex); return rc; } + static long afu_ioctl_process_element(struct cxl_context *ctx, int __user *upe) { pr_devel("%s: pe: %i\n", __func__, ctx->pe); - if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) + if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32))) return -EFAULT; return 0; @@ -259,7 +257,7 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (ctx->status == CLOSED) return -EIO; - if (!cxl_adapter_link_ok(ctx->afu->adapter)) + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) return -EIO; pr_devel("afu_ioctl\n"); @@ -289,7 +287,7 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm) if (ctx->status != STARTED) return -EIO; - if (!cxl_adapter_link_ok(ctx->afu->adapter)) + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) return -EIO; return cxl_context_iomap(ctx, vm); @@ -336,7 +334,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, int rc; DEFINE_WAIT(wait); - if (!cxl_adapter_link_ok(ctx->afu->adapter)) + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) return -EIO; if (count < CXL_READ_MIN_SIZE) @@ -349,7 +347,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, if (ctx_event_pending(ctx)) break; - if (!cxl_adapter_link_ok(ctx->afu->adapter)) { + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { rc = -EIO; goto out; } @@ -445,7 +443,8 @@ static const struct file_operations afu_master_fops = { static char *cxl_devnode(struct device *dev, umode_t *mode) { - if (CXL_DEVT_IS_CARD(dev->devt)) { + if (cpu_has_feature(CPU_FTR_HVMODE) && + CXL_DEVT_IS_CARD(dev->devt)) { /* * These minor numbers will eventually be used to program the * PSL and AFUs once we have dynamic reprogramming support @@ -546,6 +545,11 @@ int cxl_register_adapter(struct cxl *adapter) return device_register(&adapter->dev); } +dev_t cxl_get_dev(void) +{ + return cxl_dev; +} + int __init cxl_file_init(void) { int rc; diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c new file mode 100644 index 000000000000..68dd0b7da471 --- /dev/null +++ b/drivers/misc/cxl/flash.c @@ -0,0 +1,538 @@ +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/semaphore.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <asm/rtas.h> + +#include "cxl.h" +#include "hcalls.h" + +#define DOWNLOAD_IMAGE 1 +#define VALIDATE_IMAGE 2 + +struct ai_header { + u16 version; + u8 reserved0[6]; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem; + u64 image_offset; + u64 image_length; + u8 reserved1[96]; +}; + +static struct semaphore sem; +unsigned long *buffer[CXL_AI_MAX_ENTRIES]; +struct sg_list *le; +static u64 continue_token; +static unsigned int transfer; + +struct update_props_workarea { + __be32 phandle; + __be32 state; + __be64 reserved; + __be32 nprops; +} __packed; + +struct update_nodes_workarea { + __be32 state; + __be64 unit_address; + __be32 reserved; +} __packed; + +#define DEVICE_SCOPE 3 +#define NODE_ACTION_MASK 0xff000000 +#define NODE_COUNT_MASK 0x00ffffff +#define OPCODE_DELETE 0x01000000 +#define OPCODE_UPDATE 0x02000000 +#define OPCODE_ADD 0x03000000 + +static int rcall(int token, char *buf, s32 scope) +{ + int rc; + + spin_lock(&rtas_data_buf_lock); + + memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); + rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope); + memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); + + spin_unlock(&rtas_data_buf_lock); + return rc; +} + +static int update_property(struct device_node *dn, const char *name, + u32 vd, char *value) +{ + struct property *new_prop; + u32 *val; + int rc; + + new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); + if (!new_prop) + return -ENOMEM; + + new_prop->name = kstrdup(name, GFP_KERNEL); + if (!new_prop->name) { + kfree(new_prop); + return -ENOMEM; + } + + new_prop->length = vd; + new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); + if (!new_prop->value) { + kfree(new_prop->name); + kfree(new_prop); + return -ENOMEM; + } + memcpy(new_prop->value, value, vd); + + val = (u32 *)new_prop->value; + rc = cxl_update_properties(dn, new_prop); + pr_devel("%s: update property (%s, length: %i, value: %#x)\n", + dn->name, name, vd, be32_to_cpu(*val)); + + if (rc) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); + } + return rc; +} + +static int update_node(__be32 phandle, s32 scope) +{ + struct update_props_workarea *upwa; + struct device_node *dn; + int i, rc, ret; + char *prop_data; + char *buf; + int token; + u32 nprops; + u32 vd; + + token = rtas_token("ibm,update-properties"); + if (token == RTAS_UNKNOWN_SERVICE) + return -EINVAL; + + buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); + if (!dn) { + kfree(buf); + return -ENOENT; + } + + upwa = (struct update_props_workarea *)&buf[0]; + upwa->phandle = phandle; + do { + rc = rcall(token, buf, scope); + if (rc < 0) + break; + + prop_data = buf + sizeof(*upwa); + nprops = be32_to_cpu(upwa->nprops); + + if (*prop_data == 0) { + prop_data++; + vd = be32_to_cpu(*(__be32 *)prop_data); + prop_data += vd + sizeof(vd); + nprops--; + } + + for (i = 0; i < nprops; i++) { + char *prop_name; + + prop_name = prop_data; + prop_data += strlen(prop_name) + 1; + vd = be32_to_cpu(*(__be32 *)prop_data); + prop_data += sizeof(vd); + + if ((vd != 0x00000000) && (vd != 0x80000000)) { + ret = update_property(dn, prop_name, vd, + prop_data); + if (ret) + pr_err("cxl: Could not update property %s - %i\n", + prop_name, ret); + + prop_data += vd; + } + } + } while (rc == 1); + + of_node_put(dn); + kfree(buf); + return rc; +} + +static int update_devicetree(struct cxl *adapter, s32 scope) +{ + struct update_nodes_workarea *unwa; + u32 action, node_count; + int token, rc, i; + __be32 *data, drc_index, phandle; + char *buf; + + token = rtas_token("ibm,update-nodes"); + if (token == RTAS_UNKNOWN_SERVICE) + return -EINVAL; + + buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + unwa = (struct update_nodes_workarea *)&buf[0]; + unwa->unit_address = cpu_to_be64(adapter->guest->handle); + do { + rc = rcall(token, buf, scope); + if (rc && rc != 1) + break; + + data = (__be32 *)buf + 4; + while (be32_to_cpu(*data) & NODE_ACTION_MASK) { + action = be32_to_cpu(*data) & NODE_ACTION_MASK; + node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; + pr_devel("device reconfiguration - action: %#x, nodes: %#x\n", + action, node_count); + data++; + + for (i = 0; i < node_count; i++) { + phandle = *data++; + + switch (action) { + case OPCODE_DELETE: + /* nothing to do */ + break; + case OPCODE_UPDATE: + update_node(phandle, scope); + break; + case OPCODE_ADD: + /* nothing to do, just move pointer */ + drc_index = *data++; + break; + } + } + } + } while (rc == 1); + + kfree(buf); + return 0; +} + +static int handle_image(struct cxl *adapter, int operation, + long (*fct)(u64, u64, u64, u64 *), + struct cxl_adapter_image *ai) +{ + size_t mod, s_copy, len_chunk = 0; + struct ai_header *header = NULL; + unsigned int entries = 0, i; + void *dest, *from; + int rc = 0, need_header; + + /* base adapter image header */ + need_header = (ai->flags & CXL_AI_NEED_HEADER); + if (need_header) { + header = kzalloc(sizeof(struct ai_header), GFP_KERNEL); + if (!header) + return -ENOMEM; + header->version = cpu_to_be16(1); + header->vendor = cpu_to_be16(adapter->guest->vendor); + header->device = cpu_to_be16(adapter->guest->device); + header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor); + header->subsystem = cpu_to_be16(adapter->guest->subsystem); + header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE); + header->image_length = cpu_to_be64(ai->len_image); + } + + /* number of entries in the list */ + len_chunk = ai->len_data; + if (need_header) + len_chunk += CXL_AI_HEADER_SIZE; + + entries = len_chunk / CXL_AI_BUFFER_SIZE; + mod = len_chunk % CXL_AI_BUFFER_SIZE; + if (mod) + entries++; + + if (entries > CXL_AI_MAX_ENTRIES) { + rc = -EINVAL; + goto err; + } + + /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes --> + * chunk 0 ---------------------------------------------------- + * | header | data | + * ---------------------------------------------------- + * chunk 1 ---------------------------------------------------- + * | data | + * ---------------------------------------------------- + * .... + * chunk n ---------------------------------------------------- + * | data | + * ---------------------------------------------------- + */ + from = (void *) ai->data; + for (i = 0; i < entries; i++) { + dest = buffer[i]; + s_copy = CXL_AI_BUFFER_SIZE; + + if ((need_header) && (i == 0)) { + /* add adapter image header */ + memcpy(buffer[i], header, sizeof(struct ai_header)); + s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE; + dest += CXL_AI_HEADER_SIZE; /* image offset */ + } + if ((i == (entries - 1)) && mod) + s_copy = mod; + + /* copy data */ + if (copy_from_user(dest, from, s_copy)) + goto err; + + /* fill in the list */ + le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i])); + le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE); + if ((i == (entries - 1)) && mod) + le[i].len = cpu_to_be64(mod); + from += s_copy; + } + pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n", + __func__, operation, need_header, entries, continue_token); + + /* + * download/validate the adapter image to the coherent + * platform facility + */ + rc = fct(adapter->guest->handle, virt_to_phys(le), entries, + &continue_token); + if (rc == 0) /* success of download/validation operation */ + continue_token = 0; + +err: + kfree(header); + + return rc; +} + +static int transfer_image(struct cxl *adapter, int operation, + struct cxl_adapter_image *ai) +{ + int rc = 0; + int afu; + + switch (operation) { + case DOWNLOAD_IMAGE: + rc = handle_image(adapter, operation, + &cxl_h_download_adapter_image, ai); + if (rc < 0) { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + } + return rc; + + case VALIDATE_IMAGE: + rc = handle_image(adapter, operation, + &cxl_h_validate_adapter_image, ai); + if (rc < 0) { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + return rc; + } + if (rc == 0) { + pr_devel("remove curent afu\n"); + for (afu = 0; afu < adapter->slices; afu++) + cxl_guest_remove_afu(adapter->afu[afu]); + + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + + /* The entire image has now been + * downloaded and the validation has + * been successfully performed. + * After that, the partition should call + * ibm,update-nodes and + * ibm,update-properties to receive the + * current configuration + */ + rc = update_devicetree(adapter, DEVICE_SCOPE); + transfer = 1; + } + return rc; + } + + return -EINVAL; +} + +static long ioctl_transfer_image(struct cxl *adapter, int operation, + struct cxl_adapter_image __user *uai) +{ + struct cxl_adapter_image ai; + + pr_devel("%s\n", __func__); + + if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image))) + return -EFAULT; + + /* + * Make sure reserved fields and bits are set to 0 + */ + if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 || + (ai.flags & ~CXL_AI_ALL)) + return -EINVAL; + + return transfer_image(adapter, operation, &ai); +} + +static int device_open(struct inode *inode, struct file *file) +{ + int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); + struct cxl *adapter; + int rc = 0, i; + + pr_devel("in %s\n", __func__); + + BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE); + + /* Allows one process to open the device by using a semaphore */ + if (down_interruptible(&sem) != 0) + return -EPERM; + + if (!(adapter = get_cxl_adapter(adapter_num))) + return -ENODEV; + + file->private_data = adapter; + continue_token = 0; + transfer = 0; + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) + buffer[i] = NULL; + + /* aligned buffer containing list entries which describes up to + * 1 megabyte of data (256 entries of 4096 bytes each) + * Logical real address of buffer 0 - Buffer 0 length in bytes + * Logical real address of buffer 1 - Buffer 1 length in bytes + * Logical real address of buffer 2 - Buffer 2 length in bytes + * .... + * .... + * Logical real address of buffer N - Buffer N length in bytes + */ + le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); + if (!le) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (!buffer[i]) { + rc = -ENOMEM; + goto err1; + } + } + + return 0; + +err1: + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + if (buffer[i]) + free_page((unsigned long) buffer[i]); + } + + if (le) + free_page((unsigned long) le); +err: + put_device(&adapter->dev); + + return rc; +} + +static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct cxl *adapter = file->private_data; + + pr_devel("in %s\n", __func__); + + if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE) + return ioctl_transfer_image(adapter, + DOWNLOAD_IMAGE, + (struct cxl_adapter_image __user *)arg); + else if (cmd == CXL_IOCTL_VALIDATE_IMAGE) + return ioctl_transfer_image(adapter, + VALIDATE_IMAGE, + (struct cxl_adapter_image __user *)arg); + else + return -EINVAL; +} + +static long device_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return device_ioctl(file, cmd, arg); +} + +static int device_close(struct inode *inode, struct file *file) +{ + struct cxl *adapter = file->private_data; + int i; + + pr_devel("in %s\n", __func__); + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + if (buffer[i]) + free_page((unsigned long) buffer[i]); + } + + if (le) + free_page((unsigned long) le); + + up(&sem); + put_device(&adapter->dev); + continue_token = 0; + + /* reload the module */ + if (transfer) + cxl_guest_reload_module(adapter); + else { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + } + + transfer = 0; + return 0; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = device_open, + .unlocked_ioctl = device_ioctl, + .compat_ioctl = device_compat_ioctl, + .release = device_close, +}; + +void cxl_guest_remove_chardev(struct cxl *adapter) +{ + cdev_del(&adapter->guest->cdev); +} + +int cxl_guest_add_chardev(struct cxl *adapter) +{ + dev_t devt; + int rc; + + devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter)); + cdev_init(&adapter->guest->cdev, &fops); + if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) { + dev_err(&adapter->dev, + "Unable to add chardev on adapter (card%i): %i\n", + adapter->adapter_num, rc); + goto err; + } + adapter->dev.devt = devt; + sema_init(&sem, 1); +err: + return rc; +} diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c new file mode 100644 index 000000000000..8213372de2b7 --- /dev/null +++ b/drivers/misc/cxl/guest.c @@ -0,0 +1,1177 @@ +/* + * Copyright 2015 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/delay.h> + +#include "cxl.h" +#include "hcalls.h" +#include "trace.h" + +#define CXL_ERROR_DETECTED_EVENT 1 +#define CXL_SLOT_RESET_EVENT 2 +#define CXL_RESUME_EVENT 3 + +static void pci_error_handlers(struct cxl_afu *afu, + int bus_error_event, + pci_channel_state_t state) +{ + struct pci_dev *afu_dev; + + if (afu->phb == NULL) + return; + + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { + if (!afu_dev->driver) + continue; + + switch (bus_error_event) { + case CXL_ERROR_DETECTED_EVENT: + afu_dev->error_state = state; + + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->error_detected) + afu_dev->driver->err_handler->error_detected(afu_dev, state); + break; + case CXL_SLOT_RESET_EVENT: + afu_dev->error_state = state; + + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->slot_reset) + afu_dev->driver->err_handler->slot_reset(afu_dev); + break; + case CXL_RESUME_EVENT: + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->resume) + afu_dev->driver->err_handler->resume(afu_dev); + break; + } + } +} + +static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, + u64 errstat) +{ + pr_devel("in %s\n", __func__); + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); + + return cxl_ops->ack_irq(ctx, 0, errstat); +} + +static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, + void *buf, size_t len) +{ + unsigned int entries, mod; + unsigned long **vpd_buf = NULL; + struct sg_list *le; + int rc = 0, i, tocopy; + u64 out = 0; + + if (buf == NULL) + return -EINVAL; + + /* number of entries in the list */ + entries = len / SG_BUFFER_SIZE; + mod = len % SG_BUFFER_SIZE; + if (mod) + entries++; + + if (entries > SG_MAX_ENTRIES) { + entries = SG_MAX_ENTRIES; + len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; + mod = 0; + } + + vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL); + if (!vpd_buf) + return -ENOMEM; + + le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); + if (!le) { + rc = -ENOMEM; + goto err1; + } + + for (i = 0; i < entries; i++) { + vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (!vpd_buf[i]) { + rc = -ENOMEM; + goto err2; + } + le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); + le[i].len = cpu_to_be64(SG_BUFFER_SIZE); + if ((i == (entries - 1)) && mod) + le[i].len = cpu_to_be64(mod); + } + + if (adapter) + rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, + virt_to_phys(le), entries, &out); + else + rc = cxl_h_collect_vpd(afu->guest->handle, 0, + virt_to_phys(le), entries, &out); + pr_devel("length of available (entries: %i), vpd: %#llx\n", + entries, out); + + if (!rc) { + /* + * hcall returns in 'out' the size of available VPDs. + * It fills the buffer with as much data as possible. + */ + if (out < len) + len = out; + rc = len; + if (out) { + for (i = 0; i < entries; i++) { + if (len < SG_BUFFER_SIZE) + tocopy = len; + else + tocopy = SG_BUFFER_SIZE; + memcpy(buf, vpd_buf[i], tocopy); + buf += tocopy; + len -= tocopy; + } + } + } +err2: + for (i = 0; i < entries; i++) { + if (vpd_buf[i]) + free_page((unsigned long) vpd_buf[i]); + } + free_page((unsigned long) le); +err1: + kfree(vpd_buf); + return rc; +} + +static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) +{ + return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); +} + +static irqreturn_t guest_psl_irq(int irq, void *data) +{ + struct cxl_context *ctx = data; + struct cxl_irq_info irq_info; + int rc; + + pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); + rc = guest_get_irq_info(ctx, &irq_info); + if (rc) { + WARN(1, "Unable to get IRQ info: %i\n", rc); + return IRQ_HANDLED; + } + + rc = cxl_irq(irq, ctx, &irq_info); + return rc; +} + +static int afu_read_error_state(struct cxl_afu *afu, int *state_out) +{ + u64 state; + int rc = 0; + + rc = cxl_h_read_error_state(afu->guest->handle, &state); + if (!rc) { + WARN_ON(state != H_STATE_NORMAL && + state != H_STATE_DISABLE && + state != H_STATE_TEMP_UNAVAILABLE && + state != H_STATE_PERM_UNAVAILABLE); + *state_out = state & 0xffffffff; + } + return rc; +} + +static irqreturn_t guest_slice_irq_err(int irq, void *data) +{ + struct cxl_afu *afu = data; + int rc; + u64 serr; + + WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); + rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); + if (rc) { + dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); + return IRQ_HANDLED; + } + dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); + + rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); + if (rc) + dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", + rc); + + return IRQ_HANDLED; +} + + +static int irq_alloc_range(struct cxl *adapter, int len, int *irq) +{ + int i, n; + struct irq_avail *cur; + + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + n = bitmap_find_next_zero_area(cur->bitmap, cur->range, + 0, len, 0); + if (n < cur->range) { + bitmap_set(cur->bitmap, n, len); + *irq = cur->offset + n; + pr_devel("guest: allocate IRQs %#x->%#x\n", + *irq, *irq + len - 1); + + return 0; + } + } + return -ENOSPC; +} + +static int irq_free_range(struct cxl *adapter, int irq, int len) +{ + int i, n; + struct irq_avail *cur; + + if (len == 0) + return -ENOENT; + + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + if (irq >= cur->offset && + (irq + len) <= (cur->offset + cur->range)) { + n = irq - cur->offset; + bitmap_clear(cur->bitmap, n, len); + pr_devel("guest: release IRQs %#x->%#x\n", + irq, irq + len - 1); + return 0; + } + } + return -ENOENT; +} + +static int guest_reset(struct cxl *adapter) +{ + struct cxl_afu *afu = NULL; + int i, rc; + + pr_devel("Adapter reset request\n"); + for (i = 0; i < adapter->slices; i++) { + if ((afu = adapter->afu[i])) { + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_frozen); + cxl_context_detach_all(afu); + } + } + + rc = cxl_h_reset_adapter(adapter->guest->handle); + for (i = 0; i < adapter->slices; i++) { + if (!rc && (afu = adapter->afu[i])) { + pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, + pci_channel_io_normal); + pci_error_handlers(afu, CXL_RESUME_EVENT, 0); + } + } + return rc; +} + +static int guest_alloc_one_irq(struct cxl *adapter) +{ + int irq; + + spin_lock(&adapter->guest->irq_alloc_lock); + if (irq_alloc_range(adapter, 1, &irq)) + irq = -ENOSPC; + spin_unlock(&adapter->guest->irq_alloc_lock); + return irq; +} + +static void guest_release_one_irq(struct cxl *adapter, int irq) +{ + spin_lock(&adapter->guest->irq_alloc_lock); + irq_free_range(adapter, irq, 1); + spin_unlock(&adapter->guest->irq_alloc_lock); +} + +static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num) +{ + int i, try, irq; + + memset(irqs, 0, sizeof(struct cxl_irq_ranges)); + + spin_lock(&adapter->guest->irq_alloc_lock); + for (i = 0; i < CXL_IRQ_RANGES && num; i++) { + try = num; + while (try) { + if (irq_alloc_range(adapter, try, &irq) == 0) + break; + try /= 2; + } + if (!try) + goto error; + irqs->offset[i] = irq; + irqs->range[i] = try; + num -= try; + } + if (num) + goto error; + spin_unlock(&adapter->guest->irq_alloc_lock); + return 0; + +error: + for (i = 0; i < CXL_IRQ_RANGES; i++) + irq_free_range(adapter, irqs->offset[i], irqs->range[i]); + spin_unlock(&adapter->guest->irq_alloc_lock); + return -ENOSPC; +} + +static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter) +{ + int i; + + spin_lock(&adapter->guest->irq_alloc_lock); + for (i = 0; i < CXL_IRQ_RANGES; i++) + irq_free_range(adapter, irqs->offset[i], irqs->range[i]); + spin_unlock(&adapter->guest->irq_alloc_lock); +} + +static int guest_register_serr_irq(struct cxl_afu *afu) +{ + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + + if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, + guest_slice_irq_err, afu, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; + return -ENOMEM; + } + + return 0; +} + +static void guest_release_serr_irq(struct cxl_afu *afu) +{ + cxl_unmap_irq(afu->serr_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); +} + +static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) +{ + return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, + tfc >> 32, (psl_reset_mask != 0)); +} + +static void disable_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + int r, i; + + pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); + for (r = 0; r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + virq = irq_find_mapping(NULL, hwirq); + disable_irq(virq); + } + } +} + +static void enable_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + int r, i; + + pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); + for (r = 0; r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + virq = irq_find_mapping(NULL, hwirq); + enable_irq(virq); + } + } +} + +static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, + u64 offset, u64 *val) +{ + unsigned long cr; + char c; + int rc = 0; + + if (afu->crs_len < sz) + return -ENOENT; + + if (unlikely(offset >= afu->crs_len)) + return -ERANGE; + + cr = get_zeroed_page(GFP_KERNEL); + if (!cr) + return -ENOMEM; + + rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, + virt_to_phys((void *)cr), sz); + if (rc) + goto err; + + switch (sz) { + case 1: + c = *((char *) cr); + *val = c; + break; + case 2: + *val = in_le16((u16 *)cr); + break; + case 4: + *val = in_le32((unsigned *)cr); + break; + case 8: + *val = in_le64((u64 *)cr); + break; + default: + WARN_ON(1); + } +err: + free_page(cr); + return rc; +} + +static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, + u32 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); + if (!rc) + *out = (u32) val; + return rc; +} + +static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, + u16 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); + if (!rc) + *out = (u16) val; + return rc; +} + +static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, + u8 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); + if (!rc) + *out = (u8) val; + return rc; +} + +static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, + u64 *out) +{ + return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); +} + +static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) +{ + struct cxl_process_element_hcall *elem; + struct cxl *adapter = ctx->afu->adapter; + const struct cred *cred; + u32 pid, idx; + int rc, r, i; + u64 mmio_addr, mmio_size; + __be64 flags = 0; + + /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ + if (!(elem = (struct cxl_process_element_hcall *) + get_zeroed_page(GFP_KERNEL))) + return -ENOMEM; + + elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); + if (ctx->kernel) { + pid = 0; + flags |= CXL_PE_TRANSLATION_ENABLED; + flags |= CXL_PE_PRIVILEGED_PROCESS; + if (mfmsr() & MSR_SF) + flags |= CXL_PE_64_BIT; + } else { + pid = current->pid; + flags |= CXL_PE_PROBLEM_STATE; + flags |= CXL_PE_TRANSLATION_ENABLED; + if (!test_tsk_thread_flag(current, TIF_32BIT)) + flags |= CXL_PE_64_BIT; + cred = get_current_cred(); + if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) + flags |= CXL_PE_PRIVILEGED_PROCESS; + put_cred(cred); + } + elem->flags = cpu_to_be64(flags); + elem->common.tid = cpu_to_be32(0); /* Unused */ + elem->common.pid = cpu_to_be32(pid); + elem->common.csrp = cpu_to_be64(0); /* disable */ + elem->common.aurp0 = cpu_to_be64(0); /* disable */ + elem->common.aurp1 = cpu_to_be64(0); /* disable */ + + cxl_prefault(ctx, wed); + + elem->common.sstp0 = cpu_to_be64(ctx->sstp0); + elem->common.sstp1 = cpu_to_be64(ctx->sstp1); + for (r = 0; r < CXL_IRQ_RANGES; r++) { + for (i = 0; i < ctx->irqs.range[r]; i++) { + if (r == 0 && i == 0) { + elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); + } else { + idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; + elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); + } + } + } + elem->common.amr = cpu_to_be64(amr); + elem->common.wed = cpu_to_be64(wed); + + disable_afu_irqs(ctx); + + rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, + &ctx->process_token, &mmio_addr, &mmio_size); + if (rc == H_SUCCESS) { + if (ctx->master || !ctx->afu->pp_psa) { + ctx->psn_phys = ctx->afu->psn_phys; + ctx->psn_size = ctx->afu->adapter->ps_size; + } else { + ctx->psn_phys = mmio_addr; + ctx->psn_size = mmio_size; + } + if (ctx->afu->pp_psa && mmio_size && + ctx->afu->pp_size == 0) { + /* + * There's no property in the device tree to read the + * pp_size. We only find out at the 1st attach. + * Compared to bare-metal, it is too late and we + * should really lock here. However, on powerVM, + * pp_size is really only used to display in /sys. + * Being discussed with pHyp for their next release. + */ + ctx->afu->pp_size = mmio_size; + } + /* from PAPR: process element is bytes 4-7 of process token */ + ctx->external_pe = ctx->process_token & 0xFFFFFFFF; + pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", + ctx->pe, ctx->external_pe, ctx->psn_size); + ctx->pe_inserted = true; + enable_afu_irqs(ctx); + } + + free_page((u64)elem); + return rc; +} + +static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) +{ + pr_devel("in %s\n", __func__); + + ctx->kernel = kernel; + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return attach_afu_directed(ctx, wed, amr); + + /* dedicated mode not supported on FW840 */ + + return -EINVAL; +} + +static int detach_afu_directed(struct cxl_context *ctx) +{ + if (!ctx->pe_inserted) + return 0; + if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) + return -1; + return 0; +} + +static int guest_detach_process(struct cxl_context *ctx) +{ + pr_devel("in %s\n", __func__); + trace_cxl_detach(ctx); + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + return -EIO; + + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return detach_afu_directed(ctx); + + return -EINVAL; +} + +static void guest_release_afu(struct device *dev) +{ + struct cxl_afu *afu = to_cxl_afu(dev); + + pr_devel("%s\n", __func__); + + idr_destroy(&afu->contexts_idr); + + kfree(afu->guest); + kfree(afu); +} + +ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) +{ + return guest_collect_vpd(NULL, afu, buf, len); +} + +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE +static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, + loff_t off, size_t count) +{ + void *tbuf = NULL; + int rc = 0; + + tbuf = (void *) get_zeroed_page(GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + rc = cxl_h_get_afu_err(afu->guest->handle, + off & 0x7, + virt_to_phys(tbuf), + count); + if (rc) + goto err; + + if (count > ERR_BUFF_MAX_COPY_SIZE) + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); + memcpy(buf, tbuf, count); +err: + free_page((u64)tbuf); + + return rc; +} + +static int guest_afu_check_and_enable(struct cxl_afu *afu) +{ + return 0; +} + +static bool guest_support_attributes(const char *attr_name, + enum cxl_attrs type) +{ + switch (type) { + case CXL_ADAPTER_ATTRS: + if ((strcmp(attr_name, "base_image") == 0) || + (strcmp(attr_name, "load_image_on_perst") == 0) || + (strcmp(attr_name, "perst_reloads_same_image") == 0) || + (strcmp(attr_name, "image_loaded") == 0)) + return false; + break; + case CXL_AFU_MASTER_ATTRS: + if ((strcmp(attr_name, "pp_mmio_off") == 0)) + return false; + break; + case CXL_AFU_ATTRS: + break; + default: + break; + } + + return true; +} + +static int activate_afu_directed(struct cxl_afu *afu) +{ + int rc; + + dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); + + afu->current_mode = CXL_MODE_DIRECTED; + + afu->num_procs = afu->max_procs_virtualised; + + if ((rc = cxl_chardev_m_afu_add(afu))) + return rc; + + if ((rc = cxl_sysfs_afu_m_add(afu))) + goto err; + + if ((rc = cxl_chardev_s_afu_add(afu))) + goto err1; + + return 0; +err1: + cxl_sysfs_afu_m_remove(afu); +err: + cxl_chardev_afu_remove(afu); + return rc; +} + +static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) +{ + if (!mode) + return 0; + if (!(mode & afu->modes_supported)) + return -EINVAL; + + if (mode == CXL_MODE_DIRECTED) + return activate_afu_directed(afu); + + if (mode == CXL_MODE_DEDICATED) + dev_err(&afu->dev, "Dedicated mode not supported\n"); + + return -EINVAL; +} + +static int deactivate_afu_directed(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); + + afu->current_mode = 0; + afu->num_procs = 0; + + cxl_sysfs_afu_m_remove(afu); + cxl_chardev_afu_remove(afu); + + cxl_ops->afu_reset(afu); + + return 0; +} + +static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) +{ + if (!mode) + return 0; + if (!(mode & afu->modes_supported)) + return -EINVAL; + + if (mode == CXL_MODE_DIRECTED) + return deactivate_afu_directed(afu); + return 0; +} + +static int guest_afu_reset(struct cxl_afu *afu) +{ + pr_devel("AFU(%d) reset request\n", afu->slice); + return cxl_h_reset_afu(afu->guest->handle); +} + +static int guest_map_slice_regs(struct cxl_afu *afu) +{ + if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { + dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", + afu->slice); + return -ENOMEM; + } + return 0; +} + +static void guest_unmap_slice_regs(struct cxl_afu *afu) +{ + if (afu->p2n_mmio) + iounmap(afu->p2n_mmio); +} + +static int afu_update_state(struct cxl_afu *afu) +{ + int rc, cur_state; + + rc = afu_read_error_state(afu, &cur_state); + if (rc) + return rc; + + if (afu->guest->previous_state == cur_state) + return 0; + + pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); + + switch (cur_state) { + case H_STATE_NORMAL: + afu->guest->previous_state = cur_state; + rc = 1; + break; + + case H_STATE_DISABLE: + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_frozen); + + cxl_context_detach_all(afu); + if ((rc = cxl_ops->afu_reset(afu))) + pr_devel("reset hcall failed %d\n", rc); + + rc = afu_read_error_state(afu, &cur_state); + if (!rc && cur_state == H_STATE_NORMAL) { + pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, + pci_channel_io_normal); + pci_error_handlers(afu, CXL_RESUME_EVENT, 0); + rc = 1; + } + afu->guest->previous_state = 0; + break; + + case H_STATE_TEMP_UNAVAILABLE: + afu->guest->previous_state = cur_state; + break; + + case H_STATE_PERM_UNAVAILABLE: + dev_err(&afu->dev, "AFU is in permanent error state\n"); + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_perm_failure); + afu->guest->previous_state = cur_state; + break; + + default: + pr_err("Unexpected AFU(%d) error state: %#x\n", + afu->slice, cur_state); + return -EINVAL; + } + + return rc; +} + +static int afu_do_recovery(struct cxl_afu *afu) +{ + int rc; + + /* many threads can arrive here, in case of detach_all for example. + * Only one needs to drive the recovery + */ + if (mutex_trylock(&afu->guest->recovery_lock)) { + rc = afu_update_state(afu); + mutex_unlock(&afu->guest->recovery_lock); + return rc; + } + return 0; +} + +static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) +{ + int state; + + if (afu) { + if (afu_read_error_state(afu, &state) || + state != H_STATE_NORMAL) { + if (afu_do_recovery(afu) > 0) { + /* check again in case we've just fixed it */ + if (!afu_read_error_state(afu, &state) && + state == H_STATE_NORMAL) + return true; + } + return false; + } + } + + return true; +} + +static int afu_properties_look_ok(struct cxl_afu *afu) +{ + if (afu->pp_irqs < 0) { + dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); + return -EINVAL; + } + + if (afu->max_procs_virtualised < 1) { + dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); + return -EINVAL; + } + + if (afu->crs_len < 0) { + dev_err(&afu->dev, "Unexpected configuration record size value\n"); + return -EINVAL; + } + + return 0; +} + +int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) +{ + struct cxl_afu *afu; + bool free = true; + int rc; + + pr_devel("in %s - AFU(%d)\n", __func__, slice); + if (!(afu = cxl_alloc_afu(adapter, slice))) + return -ENOMEM; + + if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { + kfree(afu); + return -ENOMEM; + } + + mutex_init(&afu->guest->recovery_lock); + + if ((rc = dev_set_name(&afu->dev, "afu%i.%i", + adapter->adapter_num, + slice))) + goto err1; + + adapter->slices++; + + if ((rc = cxl_of_read_afu_handle(afu, afu_np))) + goto err1; + + if ((rc = cxl_ops->afu_reset(afu))) + goto err1; + + if ((rc = cxl_of_read_afu_properties(afu, afu_np))) + goto err1; + + if ((rc = afu_properties_look_ok(afu))) + goto err1; + + if ((rc = guest_map_slice_regs(afu))) + goto err1; + + if ((rc = guest_register_serr_irq(afu))) + goto err2; + + /* + * After we call this function we must not free the afu directly, even + * if it returns an error! + */ + if ((rc = cxl_register_afu(afu))) + goto err_put1; + + if ((rc = cxl_sysfs_afu_add(afu))) + goto err_put1; + + /* + * pHyp doesn't expose the programming models supported by the + * AFU. pHyp currently only supports directed mode. If it adds + * dedicated mode later, this version of cxl has no way to + * detect it. So we'll initialize the driver, but the first + * attach will fail. + * Being discussed with pHyp to do better (likely new property) + */ + if (afu->max_procs_virtualised == 1) + afu->modes_supported = CXL_MODE_DEDICATED; + else + afu->modes_supported = CXL_MODE_DIRECTED; + + if ((rc = cxl_afu_select_best_mode(afu))) + goto err_put2; + + adapter->afu[afu->slice] = afu; + + afu->enabled = true; + + if ((rc = cxl_pci_vphb_add(afu))) + dev_info(&afu->dev, "Can't register vPHB\n"); + + return 0; + +err_put2: + cxl_sysfs_afu_remove(afu); +err_put1: + device_unregister(&afu->dev); + free = false; + guest_release_serr_irq(afu); +err2: + guest_unmap_slice_regs(afu); +err1: + if (free) { + kfree(afu->guest); + kfree(afu); + } + return rc; +} + +void cxl_guest_remove_afu(struct cxl_afu *afu) +{ + pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); + + if (!afu) + return; + + cxl_pci_vphb_remove(afu); + cxl_sysfs_afu_remove(afu); + + spin_lock(&afu->adapter->afu_list_lock); + afu->adapter->afu[afu->slice] = NULL; + spin_unlock(&afu->adapter->afu_list_lock); + + cxl_context_detach_all(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); + guest_release_serr_irq(afu); + guest_unmap_slice_regs(afu); + + device_unregister(&afu->dev); +} + +static void free_adapter(struct cxl *adapter) +{ + struct irq_avail *cur; + int i; + + if (adapter->guest->irq_avail) { + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); + } + kfree(adapter->guest->status); + cxl_remove_adapter_nr(adapter); + kfree(adapter->guest); + kfree(adapter); +} + +static int properties_look_ok(struct cxl *adapter) +{ + /* The absence of this property means that the operational + * status is unknown or okay + */ + if (strlen(adapter->guest->status) && + strcmp(adapter->guest->status, "okay")) { + pr_err("ABORTING:Bad operational status of the device\n"); + return -EINVAL; + } + + return 0; +} + +ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) +{ + return guest_collect_vpd(adapter, NULL, buf, len); +} + +void cxl_guest_remove_adapter(struct cxl *adapter) +{ + pr_devel("in %s\n", __func__); + + cxl_sysfs_adapter_remove(adapter); + + cxl_guest_remove_chardev(adapter); + device_unregister(&adapter->dev); +} + +static void release_adapter(struct device *dev) +{ + free_adapter(to_cxl_adapter(dev)); +} + +struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) +{ + struct cxl *adapter; + bool free = true; + int rc; + + if (!(adapter = cxl_alloc_adapter())) + return ERR_PTR(-ENOMEM); + + if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { + free_adapter(adapter); + return ERR_PTR(-ENOMEM); + } + + adapter->slices = 0; + adapter->guest->pdev = pdev; + adapter->dev.parent = &pdev->dev; + adapter->dev.release = release_adapter; + dev_set_drvdata(&pdev->dev, adapter); + + if ((rc = cxl_of_read_adapter_handle(adapter, np))) + goto err1; + + if ((rc = cxl_of_read_adapter_properties(adapter, np))) + goto err1; + + if ((rc = properties_look_ok(adapter))) + goto err1; + + if ((rc = cxl_guest_add_chardev(adapter))) + goto err1; + + /* + * After we call this function we must not free the adapter directly, + * even if it returns an error! + */ + if ((rc = cxl_register_adapter(adapter))) + goto err_put1; + + if ((rc = cxl_sysfs_adapter_add(adapter))) + goto err_put1; + + return adapter; + +err_put1: + device_unregister(&adapter->dev); + free = false; + cxl_guest_remove_chardev(adapter); +err1: + if (free) + free_adapter(adapter); + return ERR_PTR(rc); +} + +void cxl_guest_reload_module(struct cxl *adapter) +{ + struct platform_device *pdev; + + pdev = adapter->guest->pdev; + cxl_guest_remove_adapter(adapter); + + cxl_of_probe(pdev); +} + +const struct cxl_backend_ops cxl_guest_ops = { + .module = THIS_MODULE, + .adapter_reset = guest_reset, + .alloc_one_irq = guest_alloc_one_irq, + .release_one_irq = guest_release_one_irq, + .alloc_irq_ranges = guest_alloc_irq_ranges, + .release_irq_ranges = guest_release_irq_ranges, + .setup_irq = NULL, + .handle_psl_slice_error = guest_handle_psl_slice_error, + .psl_interrupt = guest_psl_irq, + .ack_irq = guest_ack_irq, + .attach_process = guest_attach_process, + .detach_process = guest_detach_process, + .support_attributes = guest_support_attributes, + .link_ok = guest_link_ok, + .release_afu = guest_release_afu, + .afu_read_err_buffer = guest_afu_read_err_buffer, + .afu_check_and_enable = guest_afu_check_and_enable, + .afu_activate_mode = guest_afu_activate_mode, + .afu_deactivate_mode = guest_afu_deactivate_mode, + .afu_reset = guest_afu_reset, + .afu_cr_read8 = guest_afu_cr_read8, + .afu_cr_read16 = guest_afu_cr_read16, + .afu_cr_read32 = guest_afu_cr_read32, + .afu_cr_read64 = guest_afu_cr_read64, + .afu_cr_write8 = guest_afu_cr_write8, + .afu_cr_write16 = guest_afu_cr_write16, + .afu_cr_write32 = guest_afu_cr_write32, + .read_adapter_vpd = cxl_guest_read_adapter_vpd, +}; diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c new file mode 100644 index 000000000000..d6d11f4056d7 --- /dev/null +++ b/drivers/misc/cxl/hcalls.c @@ -0,0 +1,647 @@ +/* + * Copyright 2015 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include "hcalls.h" +#include "trace.h" + +#define CXL_HCALL_TIMEOUT 60000 +#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000 + +#define H_ATTACH_CA_PROCESS 0x344 +#define H_CONTROL_CA_FUNCTION 0x348 +#define H_DETACH_CA_PROCESS 0x34C +#define H_COLLECT_CA_INT_INFO 0x350 +#define H_CONTROL_CA_FAULTS 0x354 +#define H_DOWNLOAD_CA_FUNCTION 0x35C +#define H_DOWNLOAD_CA_FACILITY 0x364 +#define H_CONTROL_CA_FACILITY 0x368 + +#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */ +#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */ +#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */ +#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */ +#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */ +#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */ +#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */ +#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */ +#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */ +#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */ +#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */ +#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */ + +#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1 +#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2 + +#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */ +#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */ + +#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */ +#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */ + + +#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \ + { \ + unsigned int delay, total_delay = 0; \ + u64 token = 0; \ + \ + memset(retbuf, 0, sizeof(retbuf)); \ + while (1) { \ + rc = call(fn, retbuf, __VA_ARGS__, token); \ + token = retbuf[0]; \ + if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \ + break; \ + \ + if (rc == H_BUSY) \ + delay = 10; \ + else \ + delay = get_longbusy_msecs(rc); \ + \ + total_delay += delay; \ + if (total_delay > CXL_HCALL_TIMEOUT) { \ + WARN(1, "Warning: Giving up waiting for CXL hcall " \ + "%#x after %u msec\n", fn, total_delay); \ + rc = H_BUSY; \ + break; \ + } \ + msleep(delay); \ + } \ + } +#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__) +#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__) + +#define _PRINT_MSG(rc, format, ...) \ + { \ + if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \ + pr_err(format, __VA_ARGS__); \ + else \ + pr_devel(format, __VA_ARGS__); \ + } \ + + +static char *afu_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "RESET", /* 1 */ + "SUSPEND_PROCESS", /* 2 */ + "RESUME_PROCESS", /* 3 */ + "READ_ERR_STATE", /* 4 */ + "GET_AFU_ERR", /* 5 */ + "GET_CONFIG", /* 6 */ + "GET_DOWNLOAD_STATE", /* 7 */ + "TERMINATE_PROCESS", /* 8 */ + "COLLECT_VPD", /* 9 */ + "UNKNOWN_OP", /* 10 undefined */ + "GET_FUNCTION_ERR_INT", /* 11 */ + "ACK_FUNCTION_ERR_INT", /* 12 */ + "GET_ERROR_LOG", /* 13 */ +}; + +static char *control_adapter_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "RESET", /* 1 */ + "COLLECT_VPD", /* 2 */ +}; + +static char *download_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "DOWNLOAD", /* 1 */ + "VALIDATE", /* 2 */ +}; + +static char *op_str(unsigned int op, char *name_array[], int array_len) +{ + if (op >= array_len) + return "UNKNOWN_OP"; + return name_array[op]; +} + +#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array)) + +#define OP_STR_AFU(op) OP_STR(op, afu_op_names) +#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names) +#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names) + + +long cxl_h_attach_process(u64 unit_address, + struct cxl_process_element_hcall *element, + u64 *process_token, u64 *mmio_addr, u64 *mmio_size) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element)); + _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n", + unit_address, virt_to_phys(element), rc); + trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc); + + pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n", + retbuf[0], retbuf[1], retbuf[2]); + cxl_dump_debug_buffer(element, sizeof(*element)); + + switch (rc) { + case H_SUCCESS: /* The process info is attached to the coherent platform function */ + *process_token = retbuf[0]; + if (mmio_addr) + *mmio_addr = retbuf[1]; + if (mmio_size) + *mmio_size = retbuf[2]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_detach_process - Detach a process element from a coherent + * platform function. + */ +long cxl_h_detach_process(u64 unit_address, u64 process_token) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token); + _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc); + trace_cxl_hcall_detach(unit_address, process_token, rc); + + switch (rc) { + case H_SUCCESS: /* The process was detached from the coherent platform function */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the detach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows + * the partition to manipulate or query + * certain coherent platform function behaviors. + */ +static long cxl_h_control_function(u64 unit_address, u64 op, + u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + long rc; + + CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4); + _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", + unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); + trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform function */ + if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT || + op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE || + op == H_CONTROL_CA_FUNCTION_COLLECT_VPD)) + *out = retbuf[0]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ + case H_SG_LIST: /* An block list entry was invalid */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_reset_afu - Perform a reset to the coherent platform function. + */ +long cxl_h_reset_afu(u64 unit_address) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_RESET, + 0, 0, 0, 0, + NULL); +} + +/** + * cxl_h_suspend_process - Suspend a process from being executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_suspend_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/** + * cxl_h_resume_process - Resume a process to be executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_resume_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_RESUME_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/** + * cxl_h_read_error_state - Checks the error state of the coherent + * platform function. + * R4 contains the error state + */ +long cxl_h_read_error_state(u64 unit_address, u64 *state) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_READ_ERR_STATE, + 0, 0, 0, 0, + state); +} + +/** + * cxl_h_get_afu_err - collect the AFU error buffer + * Parameter1 = byte offset into error buffer to retrieve, valid values + * are between 0 and (ibm,error-buffer-size - 1) + * Parameter2 = 4K aligned real address of error buffer, to be filled in + * Parameter3 = length of error buffer, valid values are 4K or less + */ +long cxl_h_get_afu_err(u64 unit_address, u64 offset, + u64 buf_address, u64 len) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_AFU_ERR, + offset, buf_address, len, 0, + NULL); +} + +/** + * cxl_h_get_config - collect configuration record for the + * coherent platform function + * Parameter1 = # of configuration record to retrieve, valid values are + * between 0 and (ibm,#config-records - 1) + * Parameter2 = byte offset into configuration record to retrieve, + * valid values are between 0 and (ibm,config-record-size - 1) + * Parameter3 = 4K aligned real address of configuration record buffer, + * to be filled in + * Parameter4 = length of configuration buffer, valid values are 4K or less + */ +long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, + u64 buf_address, u64 len) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_CONFIG, + cr_num, offset, buf_address, len, + NULL); +} + +/** + * cxl_h_terminate_process - Terminate the process before completion + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_terminate_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = # of VPD record to retrieve, valid values are between 0 + * and (ibm,#config-records - 1). + * Parameter2 = 4K naturally aligned real buffer containing block + * list entries + * Parameter3 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, + u64 num, u64 *out) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_COLLECT_VPD, + record, list_address, num, 0, + out); +} + +/** + * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt + */ +long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT, + 0, 0, 0, 0, reg); +} + +/** + * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data + * based on an interrupt + * Parameter1 = value to write to the function-wide error interrupt register + */ +long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT, + value, 0, 0, 0, + NULL); +} + +/** + * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of + * an error log + */ +long cxl_h_get_error_log(u64 unit_address, u64 value) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_ERROR_LOG, + 0, 0, 0, 0, + NULL); +} + +/** + * cxl_h_collect_int_info - Collect interrupt info about a coherent + * platform function after an interrupt occurred. + */ +long cxl_h_collect_int_info(u64 unit_address, u64 process_token, + struct cxl_irq_info *info) +{ + long rc; + + BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE])); + + rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info, + unit_address, process_token); + _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n", + unit_address, process_token, rc); + trace_cxl_hcall_collect_int_info(unit_address, process_token, rc); + + switch (rc) { + case H_SUCCESS: /* The interrupt info is returned in return registers. */ + pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid:%u, tid:%u, afu_err:%#llx, errstat:%#llx\n", + info->dsisr, info->dar, info->dsr, info->pid, + info->tid, info->afu_err, info->errstat); + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */ + case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/ + case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */ + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_control_faults - Control the operation of a coherent platform + * function after a fault occurs. + * + * Parameters + * control-mask: value to control the faults + * looks like PSL_TFC_An shifted >> 32 + * reset-mask: mask to control reset of function faults + * Set reset_mask = 1 to reset PSL errors + */ +long cxl_h_control_faults(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + memset(retbuf, 0, sizeof(retbuf)); + + rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address, + H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token, + control_mask, reset_mask); + _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n", + unit_address, process_token, control_mask, reset_mask, + rc, retbuf[0]); + trace_cxl_hcall_control_faults(unit_address, process_token, + control_mask, reset_mask, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* Faults were successfully controlled for the function. */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_HARDWARE: /* A hardware event prevented the control of faults. */ + case H_STATE: /* The function was in an invalid state. */ + case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */ + return -EBUSY; + case H_FUNCTION: /* The function is not supported */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + return -EINVAL; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call + * allows the partition to manipulate or query + * certain coherent platform facility behaviors. + */ +static long cxl_h_control_facility(u64 unit_address, u64 op, + u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + long rc; + + CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4); + _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", + unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); + trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform facility */ + if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD) + *out = retbuf[0]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ + case H_SG_LIST: /* An block list entry was invalid */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform facility is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. + */ +long cxl_h_reset_adapter(u64 unit_address) +{ + return cxl_h_control_facility(unit_address, + H_CONTROL_CA_FACILITY_RESET, + 0, 0, 0, 0, + NULL); +} + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = 4K naturally aligned real buffer containing block + * list entries + * Parameter2 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, + u64 num, u64 *out) +{ + return cxl_h_control_facility(unit_address, + H_CONTROL_CA_FACILITY_COLLECT_VPD, + list_address, num, 0, 0, + out); +} + +/** + * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY + * hypervisor call provide platform support for + * downloading a base adapter image to the coherent + * platform facility, and for validating the entire + * image after the download. + * Parameters + * op: operation to perform to the coherent platform function + * Download: operation = 1, the base image in the coherent platform + * facility is first erased, and then + * programmed using the image supplied + * in the scatter/gather list. + * Validate: operation = 2, the base image in the coherent platform + * facility is compared with the image + * supplied in the scatter/gather list. + * list_address: 4K naturally aligned real buffer containing + * scatter/gather list entries. + * num: number of block list entries in the scatter/gather list. + */ +static long cxl_h_download_facility(u64 unit_address, u64 op, + u64 list_address, u64 num, + u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + unsigned int delay, total_delay = 0; + u64 token = 0; + long rc; + + if (*out != 0) + token = *out; + + memset(retbuf, 0, sizeof(retbuf)); + while (1) { + rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf, + unit_address, op, list_address, num, + token); + token = retbuf[0]; + if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) + break; + + if (rc != H_BUSY) { + delay = get_longbusy_msecs(rc); + total_delay += delay; + if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) { + WARN(1, "Warning: Giving up waiting for CXL hcall " + "%#x after %u msec\n", + H_DOWNLOAD_CA_FACILITY, total_delay); + rc = H_BUSY; + break; + } + msleep(delay); + } + } + _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n", + unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); + trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform facility */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied */ + case H_FUNCTION: /* The function is not supported. */ + case H_SG_LIST: /* An block list entry was invalid */ + case H_BAD_DATA: /* Image verification failed */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform facility is not in a valid state */ + case H_BUSY: + return -EBUSY; + case H_CONTINUE: + *out = retbuf[0]; + return 1; /* More data is needed for the complete image */ + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/** + * cxl_h_download_adapter_image - Download the base image to the coherent + * platform facility. + */ +long cxl_h_download_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out) +{ + return cxl_h_download_facility(unit_address, + H_DOWNLOAD_CA_FACILITY_DOWNLOAD, + list_address, num, out); +} + +/** + * cxl_h_validate_adapter_image - Validate the base image in the coherent + * platform facility. + */ +long cxl_h_validate_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out) +{ + return cxl_h_download_facility(unit_address, + H_DOWNLOAD_CA_FACILITY_VALIDATE, + list_address, num, out); +} diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h new file mode 100644 index 000000000000..3e25522a5df6 --- /dev/null +++ b/drivers/misc/cxl/hcalls.h @@ -0,0 +1,204 @@ +/* + * Copyright 2015 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _HCALLS_H +#define _HCALLS_H + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/hvcall.h> +#include "cxl.h" + +#define SG_BUFFER_SIZE 4096 +#define SG_MAX_ENTRIES 256 + +struct sg_list { + u64 phys_addr; + u64 len; +}; + +/* + * This is straight out of PAPR, but replacing some of the compound fields with + * a single field, where they were identical to the register layout. + * + * The 'flags' parameter regroups the various bit-fields + */ +#define CXL_PE_CSRP_VALID (1ULL << 63) +#define CXL_PE_PROBLEM_STATE (1ULL << 62) +#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61) +#define CXL_PE_TAGS_ACTIVE (1ULL << 60) +#define CXL_PE_USER_STATE (1ULL << 59) +#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58) +#define CXL_PE_64_BIT (1ULL << 57) +#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56) + +#define CXL_PROCESS_ELEMENT_VERSION 1 +struct cxl_process_element_hcall { + __be64 version; + __be64 flags; + u8 reserved0[12]; + __be32 pslVirtualIsn; + u8 applicationVirtualIsnBitmap[256]; + u8 reserved1[144]; + struct cxl_process_element_common common; + u8 reserved4[12]; +} __packed; + +#define H_STATE_NORMAL 1 +#define H_STATE_DISABLE 2 +#define H_STATE_TEMP_UNAVAILABLE 3 +#define H_STATE_PERM_UNAVAILABLE 4 + +/* NOTE: element must be a logical real address, and must be pinned */ +long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element, + u64 *process_token, u64 *mmio_addr, u64 *mmio_size); + +/** + * cxl_h_detach_process - Detach a process element from a coherent + * platform function. + */ +long cxl_h_detach_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_reset_afu - Perform a reset to the coherent platform function. + */ +long cxl_h_reset_afu(u64 unit_address); + +/** + * cxl_h_suspend_process - Suspend a process from being executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_suspend_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_resume_process - Resume a process to be executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_resume_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_read_error_state - Reads the error state of the coherent + * platform function. + * R4 contains the error state + */ +long cxl_h_read_error_state(u64 unit_address, u64 *state); + +/** + * cxl_h_get_afu_err - collect the AFU error buffer + * Parameter1 = byte offset into error buffer to retrieve, valid values + * are between 0 and (ibm,error-buffer-size - 1) + * Parameter2 = 4K aligned real address of error buffer, to be filled in + * Parameter3 = length of error buffer, valid values are 4K or less + */ +long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len); + +/** + * cxl_h_get_config - collect configuration record for the + * coherent platform function + * Parameter1 = # of configuration record to retrieve, valid values are + * between 0 and (ibm,#config-records - 1) + * Parameter2 = byte offset into configuration record to retrieve, + * valid values are between 0 and (ibm,config-record-size - 1) + * Parameter3 = 4K aligned real address of configuration record buffer, + * to be filled in + * Parameter4 = length of configuration buffer, valid values are 4K or less + */ +long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, + u64 buf_address, u64 len); + +/** + * cxl_h_terminate_process - Terminate the process before completion + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_terminate_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = # of VPD record to retrieve, valid values are between 0 + * and (ibm,#config-records - 1). + * Parameter2 = 4K naturally aligned real buffer containing block + * list entries + * Parameter3 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, + u64 num, u64 *out); + +/** + * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt + */ +long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg); + +/** + * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data + * based on an interrupt + * Parameter1 = value to write to the function-wide error interrupt register + */ +long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value); + +/** + * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of + * an error log + */ +long cxl_h_get_error_log(u64 unit_address, u64 value); + +/** + * cxl_h_collect_int_info - Collect interrupt info about a coherent + * platform function after an interrupt occurred. + */ +long cxl_h_collect_int_info(u64 unit_address, u64 process_token, + struct cxl_irq_info *info); + +/** + * cxl_h_control_faults - Control the operation of a coherent platform + * function after a fault occurs. + * + * Parameters + * control-mask: value to control the faults + * looks like PSL_TFC_An shifted >> 32 + * reset-mask: mask to control reset of function faults + * Set reset_mask = 1 to reset PSL errors + */ +long cxl_h_control_faults(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask); + +/** + * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. + */ +long cxl_h_reset_adapter(u64 unit_address); + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = 4K naturally aligned real buffer containing block + * list entries + * Parameter2 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, + u64 num, u64 *out); + +/** + * cxl_h_download_adapter_image - Download the base image to the coherent + * platform facility. + */ +long cxl_h_download_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out); + +/** + * cxl_h_validate_adapter_image - Validate the base image in the coherent + * platform facility. + */ +long cxl_h_validate_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out); +#endif /* _HCALLS_H */ diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 09a406058c46..be646dc41a2c 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -19,70 +19,11 @@ #include "cxl.h" #include "trace.h" -/* XXX: This is implementation specific */ -static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) +static int afu_irq_range_start(void) { - u64 fir1, fir2, fir_slice, serr, afu_debug; - - fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); - fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); - serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); - afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); - - dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); - dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); - dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); - dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); - dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); - dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); - - dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(ctx->afu->adapter); - - return cxl_ack_irq(ctx, 0, errstat); -} - -irqreturn_t cxl_slice_irq_err(int irq, void *data) -{ - struct cxl_afu *afu = data; - u64 fir_slice, errstat, serr, afu_debug; - - WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); - - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); - fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); - errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); - afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); - dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); - dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); - dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); - dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); - - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); - - return IRQ_HANDLED; -} - -static irqreturn_t cxl_irq_err(int irq, void *data) -{ - struct cxl *adapter = data; - u64 fir1, fir2, err_ivte; - - WARN(1, "CXL ERROR interrupt %i\n", irq); - - err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); - dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); - - dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(adapter); - - fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); - - dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); - - return IRQ_HANDLED; + if (cpu_has_feature(CPU_FTR_HVMODE)) + return 1; + return 0; } static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) @@ -93,9 +34,8 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da return IRQ_HANDLED; } -static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) +irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) { - struct cxl_context *ctx = data; u64 dsisr, dar; dsisr = irq_info->dsisr; @@ -145,7 +85,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) if (dsisr & CXL_PSL_DSISR_An_UR) pr_devel("CXL interrupt: AURP PTE not found\n"); if (dsisr & CXL_PSL_DSISR_An_PE) - return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); + return cxl_ops->handle_psl_slice_error(ctx, dsisr, + irq_info->errstat); if (dsisr & CXL_PSL_DSISR_An_AE) { pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); @@ -169,7 +110,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) wake_up_all(&ctx->wq); } - cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); return IRQ_HANDLED; } if (dsisr & CXL_PSL_DSISR_An_OC) @@ -179,54 +120,27 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) return IRQ_HANDLED; } -static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) -{ - if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); - else - cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); - - return IRQ_HANDLED; -} - -static irqreturn_t cxl_irq_multiplexed(int irq, void *data) -{ - struct cxl_afu *afu = data; - struct cxl_context *ctx; - struct cxl_irq_info irq_info; - int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; - int ret; - - if ((ret = cxl_get_irq(afu, &irq_info))) { - WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); - return fail_psl_irq(afu, &irq_info); - } - - rcu_read_lock(); - ctx = idr_find(&afu->contexts_idr, ph); - if (ctx) { - ret = cxl_irq(irq, ctx, &irq_info); - rcu_read_unlock(); - return ret; - } - rcu_read_unlock(); - - WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" - " %016llx\n(Possible AFU HW issue - was a term/remove acked" - " with outstanding transactions?)\n", ph, irq_info.dsisr, - irq_info.dar); - return fail_psl_irq(afu, &irq_info); -} - static irqreturn_t cxl_irq_afu(int irq, void *data) { struct cxl_context *ctx = data; irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); - int irq_off, afu_irq = 1; + int irq_off, afu_irq = 0; __u16 range; int r; - for (r = 1; r < CXL_IRQ_RANGES; r++) { + /* + * Look for the interrupt number. + * On bare-metal, we know range 0 only contains the PSL + * interrupt so we could start counting at range 1 and initialize + * afu_irq at 1. + * In a guest, range 0 also contains AFU interrupts, so it must + * be counted for. Therefore we initialize afu_irq at 0 to take into + * account the PSL interrupt. + * + * For code-readability, it just seems easier to go over all + * the ranges on bare-metal and guest. The end result is the same. + */ + for (r = 0; r < CXL_IRQ_RANGES; r++) { irq_off = hwirq - ctx->irqs.offset[r]; range = ctx->irqs.range[r]; if (irq_off >= 0 && irq_off < range) { @@ -236,7 +150,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) afu_irq += range; } if (unlikely(r >= CXL_IRQ_RANGES)) { - WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", + WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", ctx->pe, irq, hwirq); return IRQ_HANDLED; } @@ -246,7 +160,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) afu_irq, ctx->pe, irq, hwirq); if (unlikely(!ctx->irq_bitmap)) { - WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n"); + WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); return IRQ_HANDLED; } spin_lock(&ctx->lock); @@ -272,7 +186,8 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, return 0; } - cxl_setup_irq(adapter, hwirq, virq); + if (cxl_ops->setup_irq) + cxl_ops->setup_irq(adapter, hwirq, virq); pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); @@ -291,16 +206,16 @@ void cxl_unmap_irq(unsigned int virq, void *cookie) irq_dispose_mapping(virq); } -static int cxl_register_one_irq(struct cxl *adapter, - irq_handler_t handler, - void *cookie, - irq_hw_number_t *dest_hwirq, - unsigned int *dest_virq, - const char *name) +int cxl_register_one_irq(struct cxl *adapter, + irq_handler_t handler, + void *cookie, + irq_hw_number_t *dest_hwirq, + unsigned int *dest_virq, + const char *name) { int hwirq, virq; - if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) + if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) return hwirq; if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) @@ -312,108 +227,10 @@ static int cxl_register_one_irq(struct cxl *adapter, return 0; err: - cxl_release_one_irq(adapter, hwirq); + cxl_ops->release_one_irq(adapter, hwirq); return -ENOMEM; } -int cxl_register_psl_err_irq(struct cxl *adapter) -{ - int rc; - - adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", - dev_name(&adapter->dev)); - if (!adapter->irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, - &adapter->err_hwirq, - &adapter->err_virq, - adapter->irq_name))) { - kfree(adapter->irq_name); - adapter->irq_name = NULL; - return rc; - } - - cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); - - return 0; -} - -void cxl_release_psl_err_irq(struct cxl *adapter) -{ - if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq)) - return; - - cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); - cxl_unmap_irq(adapter->err_virq, adapter); - cxl_release_one_irq(adapter, adapter->err_hwirq); - kfree(adapter->irq_name); -} - -int cxl_register_serr_irq(struct cxl_afu *afu) -{ - u64 serr; - int rc; - - afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", - dev_name(&afu->dev)); - if (!afu->err_irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, - &afu->serr_hwirq, - &afu->serr_virq, afu->err_irq_name))) { - kfree(afu->err_irq_name); - afu->err_irq_name = NULL; - return rc; - } - - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); - serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); - - return 0; -} - -void cxl_release_serr_irq(struct cxl_afu *afu) -{ - if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) - return; - - cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); - cxl_unmap_irq(afu->serr_virq, afu); - cxl_release_one_irq(afu->adapter, afu->serr_hwirq); - kfree(afu->err_irq_name); -} - -int cxl_register_psl_irq(struct cxl_afu *afu) -{ - int rc; - - afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", - dev_name(&afu->dev)); - if (!afu->psl_irq_name) - return -ENOMEM; - - if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, - &afu->psl_hwirq, &afu->psl_virq, - afu->psl_irq_name))) { - kfree(afu->psl_irq_name); - afu->psl_irq_name = NULL; - } - return rc; -} - -void cxl_release_psl_irq(struct cxl_afu *afu) -{ - if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq)) - return; - - cxl_unmap_irq(afu->psl_virq, afu); - cxl_release_one_irq(afu->adapter, afu->psl_hwirq); - kfree(afu->psl_irq_name); -} - void afu_irq_name_free(struct cxl_context *ctx) { struct cxl_irq_name *irq_name, *tmp; @@ -429,16 +246,33 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) { int rc, r, i, j = 1; struct cxl_irq_name *irq_name; + int alloc_count; + + /* + * In native mode, range 0 is reserved for the multiplexed + * PSL interrupt. It has been allocated when the AFU was initialized. + * + * In a guest, the PSL interrupt is not mutliplexed, but per-context, + * and is the first interrupt from range 0. It still needs to be + * allocated, so bump the count by one. + */ + if (cpu_has_feature(CPU_FTR_HVMODE)) + alloc_count = count; + else + alloc_count = count + 1; /* Initialize the list head to hold irq names */ INIT_LIST_HEAD(&ctx->irq_names); - if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) + if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, + alloc_count))) return rc; - /* Multiplexed PSL Interrupt */ - ctx->irqs.offset[0] = ctx->afu->psl_hwirq; - ctx->irqs.range[0] = 1; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + /* Multiplexed PSL Interrupt */ + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } ctx->irq_count = count; ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), @@ -450,7 +284,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) * Allocate names first. If any fail, bail out before allocating * actual hardware IRQs. */ - for (r = 1; r < CXL_IRQ_RANGES; r++) { + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { for (i = 0; i < ctx->irqs.range[r]; i++) { irq_name = kmalloc(sizeof(struct cxl_irq_name), GFP_KERNEL); @@ -471,7 +305,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) return 0; out: - cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); afu_irq_name_free(ctx); return -ENOMEM; } @@ -480,15 +314,30 @@ static void afu_register_hwirqs(struct cxl_context *ctx) { irq_hw_number_t hwirq; struct cxl_irq_name *irq_name; - int r,i; + int r, i; + irqreturn_t (*handler)(int irq, void *data); /* We've allocated all memory now, so let's do the irq allocations */ irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); - for (r = 1; r < CXL_IRQ_RANGES; r++) { + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { hwirq = ctx->irqs.offset[r]; for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { - cxl_map_irq(ctx->afu->adapter, hwirq, - cxl_irq_afu, ctx, irq_name->name); + if (r == 0 && i == 0) + /* + * The very first interrupt of range 0 is + * always the PSL interrupt, but we only + * need to connect a handler for guests, + * because there's one PSL interrupt per + * context. + * On bare-metal, the PSL interrupt is + * multiplexed and was setup when the AFU + * was configured. + */ + handler = cxl_ops->psl_interrupt; + else + handler = cxl_irq_afu; + cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, + irq_name->name); irq_name = list_next_entry(irq_name, list); } } @@ -504,7 +353,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count) afu_register_hwirqs(ctx); return 0; - } +} void afu_release_irqs(struct cxl_context *ctx, void *cookie) { @@ -512,7 +361,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) unsigned int virq; int r, i; - for (r = 1; r < CXL_IRQ_RANGES; r++) { + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { hwirq = ctx->irqs.offset[r]; for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { virq = irq_find_mapping(NULL, hwirq); @@ -522,7 +371,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) } afu_irq_name_free(ctx); - cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); ctx->irq_count = 0; } diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 9fde75ed4fac..ae68c3201156 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -32,6 +32,29 @@ uint cxl_verbose; module_param_named(verbose, cxl_verbose, uint, 0600); MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); +const struct cxl_backend_ops *cxl_ops; + +int cxl_afu_slbia(struct cxl_afu *afu) +{ + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + + pr_devel("cxl_afu_slbia issuing SLBIA command\n"); + cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); + while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); + return -EBUSY; + } + /* If the adapter has gone down, we can assume that we + * will PERST it and that will invalidate everything. + */ + if (!cxl_ops->link_ok(afu->adapter, afu)) + return -EIO; + cpu_relax(); + } + return 0; +} + static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) { struct task_struct *task; @@ -139,6 +162,32 @@ int cxl_alloc_sst(struct cxl_context *ctx) return 0; } +/* print buffer content as integers when debugging */ +void cxl_dump_debug_buffer(void *buf, size_t buf_len) +{ +#ifdef DEBUG + int i, *ptr; + + /* + * We want to regroup up to 4 integers per line, which means they + * need to be in the same pr_devel() statement + */ + ptr = (int *) buf; + for (i = 0; i * 4 < buf_len; i += 4) { + if ((i + 3) * 4 < buf_len) + pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1], + ptr[i + 2], ptr[i + 3]); + else if ((i + 2) * 4 < buf_len) + pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1], + ptr[i + 2]); + else if ((i + 1) * 4 < buf_len) + pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]); + else + pr_devel("%.8x\n", ptr[i]); + } +#endif /* DEBUG */ +} + /* Find a CXL adapter by it's number and increase it's refcount */ struct cxl *get_cxl_adapter(int num) { @@ -152,7 +201,7 @@ struct cxl *get_cxl_adapter(int num) return adapter; } -int cxl_alloc_adapter_nr(struct cxl *adapter) +static int cxl_alloc_adapter_nr(struct cxl *adapter) { int i; @@ -174,13 +223,58 @@ void cxl_remove_adapter_nr(struct cxl *adapter) idr_remove(&cxl_adapter_idr, adapter->adapter_num); } +struct cxl *cxl_alloc_adapter(void) +{ + struct cxl *adapter; + + if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) + return NULL; + + spin_lock_init(&adapter->afu_list_lock); + + if (cxl_alloc_adapter_nr(adapter)) + goto err1; + + if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) + goto err2; + + return adapter; + +err2: + cxl_remove_adapter_nr(adapter); +err1: + kfree(adapter); + return NULL; +} + +struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) +{ + struct cxl_afu *afu; + + if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) + return NULL; + + afu->adapter = adapter; + afu->dev.parent = &adapter->dev; + afu->dev.release = cxl_ops->release_afu; + afu->slice = slice; + idr_init(&afu->contexts_idr); + mutex_init(&afu->contexts_lock); + spin_lock_init(&afu->afu_cntl_lock); + + afu->prefault_mode = CXL_PREFAULT_NONE; + afu->irqs_max = afu->adapter->user_irqs; + + return afu; +} + int cxl_afu_select_best_mode(struct cxl_afu *afu) { if (afu->modes_supported & CXL_MODE_DIRECTED) - return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED); + return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED); if (afu->modes_supported & CXL_MODE_DEDICATED) - return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED); + return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED); dev_warn(&afu->dev, "No supported programming modes available\n"); /* We don't fail this so the user can inspect sysfs */ @@ -191,9 +285,6 @@ static int __init init_cxl(void) { int rc = 0; - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return -EPERM; - if ((rc = cxl_file_init())) return rc; @@ -202,7 +293,17 @@ static int __init init_cxl(void) if ((rc = register_cxl_calls(&cxl_calls))) goto err; - if ((rc = pci_register_driver(&cxl_pci_driver))) + if (cpu_has_feature(CPU_FTR_HVMODE)) { + cxl_ops = &cxl_native_ops; + rc = pci_register_driver(&cxl_pci_driver); + } +#ifdef CONFIG_PPC_PSERIES + else { + cxl_ops = &cxl_guest_ops; + rc = platform_driver_register(&cxl_of_driver); + } +#endif + if (rc) goto err1; return 0; @@ -217,7 +318,12 @@ err: static void exit_cxl(void) { - pci_unregister_driver(&cxl_pci_driver); + if (cpu_has_feature(CPU_FTR_HVMODE)) + pci_unregister_driver(&cxl_pci_driver); +#ifdef CONFIG_PPC_PSERIES + else + platform_driver_unregister(&cxl_of_driver); +#endif cxl_debugfs_exit(); cxl_file_exit(); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index f40909793490..387fcbdf9793 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -42,7 +42,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, goto out; } - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { afu->enabled = enabled; rc = -EIO; goto out; @@ -80,7 +80,7 @@ int cxl_afu_disable(struct cxl_afu *afu) } /* This will disable as well as reset */ -int __cxl_afu_reset(struct cxl_afu *afu) +static int native_afu_reset(struct cxl_afu *afu) { pr_devel("AFU reset request\n"); @@ -90,9 +90,9 @@ int __cxl_afu_reset(struct cxl_afu *afu) false); } -int cxl_afu_check_and_enable(struct cxl_afu *afu) +static int native_afu_check_and_enable(struct cxl_afu *afu) { - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { WARN(1, "Refusing to enable afu while link down!\n"); return -EIO; } @@ -114,7 +114,7 @@ int cxl_psl_purge(struct cxl_afu *afu) pr_devel("PSL purge request\n"); - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); rc = -EIO; goto out; @@ -136,7 +136,7 @@ int cxl_psl_purge(struct cxl_afu *afu) rc = -EBUSY; goto out; } - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { rc = -EIO; goto out; } @@ -186,22 +186,22 @@ static int spa_max_procs(int spa_size) int cxl_alloc_spa(struct cxl_afu *afu) { /* Work out how many pages to allocate */ - afu->spa_order = 0; + afu->native->spa_order = 0; do { - afu->spa_order++; - afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; - afu->spa_max_procs = spa_max_procs(afu->spa_size); - } while (afu->spa_max_procs < afu->num_procs); + afu->native->spa_order++; + afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); + } while (afu->native->spa_max_procs < afu->num_procs); - WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ + WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */ - if (!(afu->spa = (struct cxl_process_element *) - __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { + if (!(afu->native->spa = (struct cxl_process_element *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); return -ENOMEM; } pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", - 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); + 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); return 0; } @@ -210,13 +210,15 @@ static void attach_spa(struct cxl_afu *afu) { u64 spap; - afu->sw_command_status = (__be64 *)((char *)afu->spa + - ((afu->spa_max_procs + 3) * 128)); + afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + + ((afu->native->spa_max_procs + 3) * 128)); - spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; - spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; + spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; + spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; spap |= CXL_PSL_SPAP_V; - pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); + pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", + afu->native->spa, afu->native->spa_max_procs, + afu->native->sw_command_status, spap); cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); } @@ -227,9 +229,10 @@ static inline void detach_spa(struct cxl_afu *afu) void cxl_release_spa(struct cxl_afu *afu) { - if (afu->spa) { - free_pages((unsigned long) afu->spa, afu->spa_order); - afu->spa = NULL; + if (afu->native->spa) { + free_pages((unsigned long) afu->native->spa, + afu->native->spa_order); + afu->native->spa = NULL; } } @@ -247,7 +250,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter) dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); return -EBUSY; } - if (!cxl_adapter_link_ok(adapter)) + if (!cxl_ops->link_ok(adapter, NULL)) return -EIO; cpu_relax(); } @@ -258,28 +261,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter) dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); return -EBUSY; } - if (!cxl_adapter_link_ok(adapter)) - return -EIO; - cpu_relax(); - } - return 0; -} - -int cxl_afu_slbia(struct cxl_afu *afu) -{ - unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - - pr_devel("cxl_afu_slbia issuing SLBIA command\n"); - cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); - while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { - if (time_after_eq(jiffies, timeout)) { - dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); - return -EBUSY; - } - /* If the adapter has gone down, we can assume that we - * will PERST it and that will invalidate everything. - */ - if (!cxl_adapter_link_ok(afu->adapter)) + if (!cxl_ops->link_ok(adapter, NULL)) return -EIO; cpu_relax(); } @@ -312,7 +294,7 @@ static void slb_invalid(struct cxl_context *ctx) struct cxl *adapter = ctx->afu->adapter; u64 slbia; - WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); + WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); cxl_p1_write(adapter, CXL_PSL_LBISEL, ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | @@ -320,7 +302,7 @@ static void slb_invalid(struct cxl_context *ctx) cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); while (1) { - if (!cxl_adapter_link_ok(adapter)) + if (!cxl_ops->link_ok(adapter, NULL)) break; slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); if (!(slbia & CXL_TLB_SLB_P)) @@ -342,7 +324,7 @@ static int do_process_element_cmd(struct cxl_context *ctx, ctx->elem->software_state = cpu_to_be32(pe_state); smp_wmb(); - *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); + *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); smp_mb(); cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); while (1) { @@ -351,12 +333,12 @@ static int do_process_element_cmd(struct cxl_context *ctx, rc = -EBUSY; goto out; } - if (!cxl_adapter_link_ok(ctx->afu->adapter)) { + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); rc = -EIO; goto out; } - state = be64_to_cpup(ctx->afu->sw_command_status); + state = be64_to_cpup(ctx->afu->native->sw_command_status); if (state == ~0ULL) { pr_err("cxl: Error adding process element to AFU\n"); rc = -1; @@ -384,12 +366,12 @@ static int add_process_element(struct cxl_context *ctx) { int rc = 0; - mutex_lock(&ctx->afu->spa_mutex); + mutex_lock(&ctx->afu->native->spa_mutex); pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) ctx->pe_inserted = true; pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->spa_mutex); + mutex_unlock(&ctx->afu->native->spa_mutex); return rc; } @@ -401,18 +383,18 @@ static int terminate_process_element(struct cxl_context *ctx) if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) return rc; - mutex_lock(&ctx->afu->spa_mutex); + mutex_lock(&ctx->afu->native->spa_mutex); pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); /* We could be asked to terminate when the hw is down. That * should always succeed: it's not running if the hw has gone * away and is being reset. */ - if (cxl_adapter_link_ok(ctx->afu->adapter)) + if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); ctx->elem->software_state = 0; /* Remove Valid bit */ pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->spa_mutex); + mutex_unlock(&ctx->afu->native->spa_mutex); return rc; } @@ -420,20 +402,20 @@ static int remove_process_element(struct cxl_context *ctx) { int rc = 0; - mutex_lock(&ctx->afu->spa_mutex); + mutex_lock(&ctx->afu->native->spa_mutex); pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); /* We could be asked to remove when the hw is down. Again, if * the hw is down, the PE is gone, so we succeed. */ - if (cxl_adapter_link_ok(ctx->afu->adapter)) + if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); if (!rc) ctx->pe_inserted = false; slb_invalid(ctx); pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); - mutex_unlock(&ctx->afu->spa_mutex); + mutex_unlock(&ctx->afu->native->spa_mutex); return rc; } @@ -446,7 +428,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx) ctx->psn_size = ctx->afu->adapter->ps_size; } else { ctx->psn_phys = ctx->afu->psn_phys + - (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); + (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); ctx->psn_size = ctx->afu->pp_size; } } @@ -458,7 +440,7 @@ static int activate_afu_directed(struct cxl_afu *afu) dev_info(&afu->dev, "Activating AFU directed mode\n"); afu->num_procs = afu->max_procs_virtualised; - if (afu->spa == NULL) { + if (afu->native->spa == NULL) { if (cxl_alloc_spa(afu)) return -ENOMEM; } @@ -552,7 +534,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->elem->common.wed = cpu_to_be64(wed); /* first guy needs to enable */ - if ((result = cxl_afu_check_and_enable(ctx->afu))) + if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) return result; return add_process_element(ctx); @@ -568,7 +550,7 @@ static int deactivate_afu_directed(struct cxl_afu *afu) cxl_sysfs_afu_m_remove(afu); cxl_chardev_afu_remove(afu); - __cxl_afu_reset(afu); + cxl_ops->afu_reset(afu); cxl_afu_disable(afu); cxl_psl_purge(afu); @@ -632,7 +614,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) /* master only context for dedicated */ cxl_assign_psn_space(ctx); - if ((rc = __cxl_afu_reset(afu))) + if ((rc = cxl_ops->afu_reset(afu))) return rc; cxl_p2n_write(afu, CXL_PSL_WED_An, wed); @@ -652,7 +634,7 @@ static int deactivate_dedicated_process(struct cxl_afu *afu) return 0; } -int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) +static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) { if (mode == CXL_MODE_DIRECTED) return deactivate_afu_directed(afu); @@ -661,19 +643,14 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) return 0; } -int cxl_afu_deactivate_mode(struct cxl_afu *afu) -{ - return _cxl_afu_deactivate_mode(afu, afu->current_mode); -} - -int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) +static int native_afu_activate_mode(struct cxl_afu *afu, int mode) { if (!mode) return 0; if (!(mode & afu->modes_supported)) return -EINVAL; - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { WARN(1, "Device link is down, refusing to activate!\n"); return -EIO; } @@ -686,9 +663,10 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) return -EINVAL; } -int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) +static int native_attach_process(struct cxl_context *ctx, bool kernel, + u64 wed, u64 amr) { - if (!cxl_adapter_link_ok(ctx->afu->adapter)) { + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { WARN(1, "Device link is down, refusing to attach process!\n"); return -EIO; } @@ -705,7 +683,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) static inline int detach_process_native_dedicated(struct cxl_context *ctx) { - __cxl_afu_reset(ctx->afu); + cxl_ops->afu_reset(ctx->afu); cxl_afu_disable(ctx->afu); cxl_psl_purge(ctx->afu); return 0; @@ -723,7 +701,7 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx) return 0; } -int cxl_detach_process(struct cxl_context *ctx) +static int native_detach_process(struct cxl_context *ctx) { trace_cxl_detach(ctx); @@ -733,14 +711,14 @@ int cxl_detach_process(struct cxl_context *ctx) return detach_process_native_afu_directed(ctx); } -int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) +static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) { u64 pidtid; /* If the adapter has gone away, we can't get any meaningful * information. */ - if (!cxl_adapter_link_ok(afu->adapter)) + if (!cxl_ops->link_ok(afu->adapter, afu)) return -EIO; info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); @@ -751,10 +729,214 @@ int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) info->tid = pidtid & 0xffffffff; info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + info->proc_handle = 0; + + return 0; +} + +static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, + u64 dsisr, u64 errstat) +{ + u64 fir1, fir2, fir_slice, serr, afu_debug; + + fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); + fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); + serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); + afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); + + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); + dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); + dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); + dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); + dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); + + dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); + cxl_stop_trace(ctx->afu->adapter); + + return cxl_ops->ack_irq(ctx, 0, errstat); +} + +static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) +{ + if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + + return IRQ_HANDLED; +} + +static irqreturn_t native_irq_multiplexed(int irq, void *data) +{ + struct cxl_afu *afu = data; + struct cxl_context *ctx; + struct cxl_irq_info irq_info; + int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; + int ret; + + if ((ret = native_get_irq_info(afu, &irq_info))) { + WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); + return fail_psl_irq(afu, &irq_info); + } + + rcu_read_lock(); + ctx = idr_find(&afu->contexts_idr, ph); + if (ctx) { + ret = cxl_irq(irq, ctx, &irq_info); + rcu_read_unlock(); + return ret; + } + rcu_read_unlock(); + + WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" + " %016llx\n(Possible AFU HW issue - was a term/remove acked" + " with outstanding transactions?)\n", ph, irq_info.dsisr, + irq_info.dar); + return fail_psl_irq(afu, &irq_info); +} + +static irqreturn_t native_slice_irq_err(int irq, void *data) +{ + struct cxl_afu *afu = data; + u64 fir_slice, errstat, serr, afu_debug; + + WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); + + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); + fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); + errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); + dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); + dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); + dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); + + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); + + return IRQ_HANDLED; +} + +static irqreturn_t native_irq_err(int irq, void *data) +{ + struct cxl *adapter = data; + u64 fir1, fir2, err_ivte; + + WARN(1, "CXL ERROR interrupt %i\n", irq); + + err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); + dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); + + dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); + cxl_stop_trace(adapter); + + fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + + dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); + + return IRQ_HANDLED; +} + +int cxl_native_register_psl_err_irq(struct cxl *adapter) +{ + int rc; + + adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&adapter->dev)); + if (!adapter->irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, + &adapter->native->err_hwirq, + &adapter->native->err_virq, + adapter->irq_name))) { + kfree(adapter->irq_name); + adapter->irq_name = NULL; + return rc; + } + + cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); + + return 0; +} + +void cxl_native_release_psl_err_irq(struct cxl *adapter) +{ + if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) + return; + + cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); + cxl_unmap_irq(adapter->native->err_virq, adapter); + cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); + kfree(adapter->irq_name); +} + +int cxl_native_register_serr_irq(struct cxl_afu *afu) +{ + u64 serr; + int rc; + + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, + &afu->serr_hwirq, + &afu->serr_virq, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; + return rc; + } + + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); + serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); return 0; } +void cxl_native_release_serr_irq(struct cxl_afu *afu) +{ + if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) + return; + + cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); + cxl_unmap_irq(afu->serr_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); +} + +int cxl_native_register_psl_irq(struct cxl_afu *afu) +{ + int rc; + + afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", + dev_name(&afu->dev)); + if (!afu->psl_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, + afu, &afu->native->psl_hwirq, &afu->native->psl_virq, + afu->psl_irq_name))) { + kfree(afu->psl_irq_name); + afu->psl_irq_name = NULL; + } + return rc; +} + +void cxl_native_release_psl_irq(struct cxl_afu *afu) +{ + if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) + return; + + cxl_unmap_irq(afu->native->psl_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); + kfree(afu->psl_irq_name); +} + static void recover_psl_err(struct cxl_afu *afu, u64 errstat) { u64 dsisr; @@ -769,7 +951,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat) cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); } -int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) +static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) { trace_cxl_psl_irq_ack(ctx, tfc); if (tfc) @@ -784,3 +966,132 @@ int cxl_check_error(struct cxl_afu *afu) { return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); } + +static bool native_support_attributes(const char *attr_name, + enum cxl_attrs type) +{ + return true; +} + +static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off); + return 0; +} + +static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off); + return 0; +} + +static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) +{ + u64 aligned_off = off & ~0x3L; + u32 val; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val); + if (!rc) + *out = (val >> ((off & 0x3) * 8)) & 0xffff; + return rc; +} + +static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) +{ + u64 aligned_off = off & ~0x3L; + u32 val; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val); + if (!rc) + *out = (val >> ((off & 0x3) * 8)) & 0xff; + return rc; +} + +static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + out_le32(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off, in); + return 0; +} + +static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) +{ + u64 aligned_off = off & ~0x3L; + u32 val32, mask, shift; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); + if (rc) + return rc; + shift = (off & 0x3) * 8; + WARN_ON(shift == 24); + mask = 0xffff << shift; + val32 = (val32 & ~mask) | (in << shift); + + rc = native_afu_cr_write32(afu, cr, aligned_off, val32); + return rc; +} + +static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) +{ + u64 aligned_off = off & ~0x3L; + u32 val32, mask, shift; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); + if (rc) + return rc; + shift = (off & 0x3) * 8; + mask = 0xff << shift; + val32 = (val32 & ~mask) | (in << shift); + + rc = native_afu_cr_write32(afu, cr, aligned_off, val32); + return rc; +} + +const struct cxl_backend_ops cxl_native_ops = { + .module = THIS_MODULE, + .adapter_reset = cxl_pci_reset, + .alloc_one_irq = cxl_pci_alloc_one_irq, + .release_one_irq = cxl_pci_release_one_irq, + .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, + .release_irq_ranges = cxl_pci_release_irq_ranges, + .setup_irq = cxl_pci_setup_irq, + .handle_psl_slice_error = native_handle_psl_slice_error, + .psl_interrupt = NULL, + .ack_irq = native_ack_irq, + .attach_process = native_attach_process, + .detach_process = native_detach_process, + .support_attributes = native_support_attributes, + .link_ok = cxl_adapter_link_ok, + .release_afu = cxl_pci_release_afu, + .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, + .afu_check_and_enable = native_afu_check_and_enable, + .afu_activate_mode = native_afu_activate_mode, + .afu_deactivate_mode = native_afu_deactivate_mode, + .afu_reset = native_afu_reset, + .afu_cr_read8 = native_afu_cr_read8, + .afu_cr_read16 = native_afu_cr_read16, + .afu_cr_read32 = native_afu_cr_read32, + .afu_cr_read64 = native_afu_cr_read64, + .afu_cr_write8 = native_afu_cr_write8, + .afu_cr_write16 = native_afu_cr_write16, + .afu_cr_write32 = native_afu_cr_write32, + .read_adapter_vpd = cxl_pci_read_adapter_vpd, +}; diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c new file mode 100644 index 000000000000..edc458395f68 --- /dev/null +++ b/drivers/misc/cxl/of.c @@ -0,0 +1,513 @@ +/* + * Copyright 2015 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> + +#include "cxl.h" + + +static const __be32 *read_prop_string(const struct device_node *np, + const char *prop_name) +{ + const __be32 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (cxl_verbose && prop) + pr_info("%s: %s\n", prop_name, (char *) prop); + return prop; +} + +static const __be32 *read_prop_dword(const struct device_node *np, + const char *prop_name, u32 *val) +{ + const __be32 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (prop) + *val = be32_to_cpu(prop[0]); + if (cxl_verbose && prop) + pr_info("%s: %#x (%u)\n", prop_name, *val, *val); + return prop; +} + +static const __be64 *read_prop64_dword(const struct device_node *np, + const char *prop_name, u64 *val) +{ + const __be64 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (prop) + *val = be64_to_cpu(prop[0]); + if (cxl_verbose && prop) + pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val); + return prop; +} + + +static int read_handle(struct device_node *np, u64 *handle) +{ + const __be32 *prop; + u64 size; + + /* Get address and size of the node */ + prop = of_get_address(np, 0, &size, NULL); + if (size) + return -EINVAL; + + /* Helper to read a big number; size is in cells (not bytes) */ + *handle = of_read_number(prop, of_n_addr_cells(np)); + return 0; +} + +static int read_phys_addr(struct device_node *np, char *prop_name, + struct cxl_afu *afu) +{ + int i, len, entry_size, naddr, nsize, type; + u64 addr, size; + const __be32 *prop; + + naddr = of_n_addr_cells(np); + nsize = of_n_size_cells(np); + + prop = of_get_property(np, prop_name, &len); + if (prop) { + entry_size = naddr + nsize; + for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) { + type = be32_to_cpu(prop[0]); + addr = of_read_number(prop, naddr); + size = of_read_number(&prop[naddr], nsize); + switch (type) { + case 0: /* unit address */ + afu->guest->handle = addr; + break; + case 1: /* p2 area */ + afu->guest->p2n_phys += addr; + afu->guest->p2n_size = size; + break; + case 2: /* problem state area */ + afu->psn_phys += addr; + afu->adapter->ps_size = size; + break; + default: + pr_err("Invalid address type %d found in %s property of AFU\n", + type, prop_name); + return -EINVAL; + } + if (cxl_verbose) + pr_info("%s: %#x %#llx (size %#llx)\n", + prop_name, type, addr, size); + } + } + return 0; +} + +static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) +{ + char vpd[256]; + int rc; + size_t len = sizeof(vpd); + + memset(vpd, 0, len); + + if (adapter) + rc = cxl_guest_read_adapter_vpd(adapter, vpd, len); + else + rc = cxl_guest_read_afu_vpd(afu, vpd, len); + + if (rc > 0) { + cxl_dump_debug_buffer(vpd, rc); + rc = 0; + } + return rc; +} + +int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) +{ + if (read_handle(afu_np, &afu->guest->handle)) + return -EINVAL; + pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); + + return 0; +} + +int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) +{ + int i, len, rc; + char *p; + const __be32 *prop; + u16 device_id, vendor_id; + u32 val = 0, class_code; + + /* Properties are read in the same order as listed in PAPR */ + + if (cxl_verbose) { + pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n"); + + prop = of_get_property(np, "compatible", &len); + i = 0; + while (i < len) { + p = (char *) prop + i; + pr_info("compatible: %s\n", p); + i += strlen(p) + 1; + } + read_prop_string(np, "name"); + } + + rc = read_phys_addr(np, "reg", afu); + if (rc) + return rc; + + rc = read_phys_addr(np, "assigned-addresses", afu); + if (rc) + return rc; + + if (afu->psn_phys == 0) + afu->psa = false; + else + afu->psa = true; + + if (cxl_verbose) { + read_prop_string(np, "ibm,loc-code"); + read_prop_string(np, "device_type"); + } + + read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised); + + if (cxl_verbose) { + read_prop_dword(np, "ibm,scratchpad-size", &val); + read_prop_dword(np, "ibm,programmable", &val); + read_prop_string(np, "ibm,phandle"); + read_vpd(NULL, afu); + } + + read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); + afu->irqs_max = afu->guest->max_ints; + + prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs); + if (prop) { + /* One extra interrupt for the PSL interrupt is already + * included. Remove it now to keep only AFU interrupts and + * match the native case. + */ + afu->pp_irqs--; + } + + if (cxl_verbose) { + read_prop_dword(np, "ibm,max-ints", &val); + read_prop_dword(np, "ibm,vpd-size", &val); + } + + read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len); + afu->eb_offset = 0; + + if (cxl_verbose) + read_prop_dword(np, "ibm,config-record-type", &val); + + read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len); + afu->crs_offset = 0; + + read_prop_dword(np, "ibm,#config-records", &afu->crs_num); + + if (cxl_verbose) { + for (i = 0; i < afu->crs_num; i++) { + rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID, + &device_id); + if (!rc) + pr_info("record %d - device-id: %#x\n", + i, device_id); + rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID, + &vendor_id); + if (!rc) + pr_info("record %d - vendor-id: %#x\n", + i, vendor_id); + rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION, + &class_code); + if (!rc) { + class_code >>= 8; + pr_info("record %d - class-code: %#x\n", + i, class_code); + } + } + + read_prop_dword(np, "ibm,function-number", &val); + read_prop_dword(np, "ibm,privileged-function", &val); + read_prop_dword(np, "vendor-id", &val); + read_prop_dword(np, "device-id", &val); + read_prop_dword(np, "revision-id", &val); + read_prop_dword(np, "class-code", &val); + read_prop_dword(np, "subsystem-vendor-id", &val); + read_prop_dword(np, "subsystem-id", &val); + } + /* + * if "ibm,process-mmio" doesn't exist then per-process mmio is + * not supported + */ + val = 0; + prop = read_prop_dword(np, "ibm,process-mmio", &val); + if (prop && val == 1) + afu->pp_psa = true; + else + afu->pp_psa = false; + + if (cxl_verbose) { + read_prop_dword(np, "ibm,supports-aur", &val); + read_prop_dword(np, "ibm,supports-csrp", &val); + read_prop_dword(np, "ibm,supports-prr", &val); + } + + prop = read_prop_dword(np, "ibm,function-error-interrupt", &val); + if (prop) + afu->serr_hwirq = val; + + pr_devel("AFU handle: %#llx\n", afu->guest->handle); + pr_devel("p2n_phys: %#llx (size %#llx)\n", + afu->guest->p2n_phys, afu->guest->p2n_size); + pr_devel("psn_phys: %#llx (size %#llx)\n", + afu->psn_phys, afu->adapter->ps_size); + pr_devel("Max number of processes virtualised=%i\n", + afu->max_procs_virtualised); + pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs, + afu->irqs_max); + pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq); + + return 0; +} + +static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np) +{ + const __be32 *ranges; + int len, nranges, i; + struct irq_avail *cur; + + ranges = of_get_property(np, "interrupt-ranges", &len); + if (ranges == NULL || len < (2 * sizeof(int))) + return -EINVAL; + + /* + * encoded array of two cells per entry, each cell encoded as + * with encode-int + */ + nranges = len / (2 * sizeof(int)); + if (nranges == 0 || (nranges * 2 * sizeof(int)) != len) + return -EINVAL; + + adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail), + GFP_KERNEL); + if (adapter->guest->irq_avail == NULL) + return -ENOMEM; + + adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]); + for (i = 0; i < nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + cur->offset = be32_to_cpu(ranges[i * 2]); + cur->range = be32_to_cpu(ranges[i * 2 + 1]); + cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range), + sizeof(*cur->bitmap), GFP_KERNEL); + if (cur->bitmap == NULL) + goto err; + if (cur->offset < adapter->guest->irq_base_offset) + adapter->guest->irq_base_offset = cur->offset; + if (cxl_verbose) + pr_info("available IRQ range: %#lx-%#lx (%lu)\n", + cur->offset, cur->offset + cur->range - 1, + cur->range); + } + adapter->guest->irq_nranges = nranges; + spin_lock_init(&adapter->guest->irq_alloc_lock); + + return 0; +err: + for (i--; i >= 0; i--) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); + adapter->guest->irq_avail = NULL; + return -ENOMEM; +} + +int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np) +{ + if (read_handle(np, &adapter->guest->handle)) + return -EINVAL; + pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle); + + return 0; +} + +int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np) +{ + int rc, len, naddr, i; + char *p; + const __be32 *prop; + u32 val = 0; + + /* Properties are read in the same order as listed in PAPR */ + + naddr = of_n_addr_cells(np); + + if (cxl_verbose) { + pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n"); + + read_prop_dword(np, "#address-cells", &val); + read_prop_dword(np, "#size-cells", &val); + + prop = of_get_property(np, "compatible", &len); + i = 0; + while (i < len) { + p = (char *) prop + i; + pr_info("compatible: %s\n", p); + i += strlen(p) + 1; + } + read_prop_string(np, "name"); + read_prop_string(np, "model"); + + prop = of_get_property(np, "reg", NULL); + if (prop) { + pr_info("reg: addr:%#llx size:%#x\n", + of_read_number(prop, naddr), + be32_to_cpu(prop[naddr])); + } + + read_prop_string(np, "ibm,loc-code"); + } + + if ((rc = read_adapter_irq_config(adapter, np))) + return rc; + + if (cxl_verbose) { + read_prop_string(np, "device_type"); + read_prop_string(np, "ibm,phandle"); + } + + prop = read_prop_dword(np, "ibm,caia-version", &val); + if (prop) { + adapter->caia_major = (val & 0xFF00) >> 8; + adapter->caia_minor = val & 0xFF; + } + + prop = read_prop_dword(np, "ibm,psl-revision", &val); + if (prop) + adapter->psl_rev = val; + + prop = read_prop_string(np, "status"); + if (prop) { + adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop); + if (adapter->guest->status == NULL) + return -ENOMEM; + } + + prop = read_prop_dword(np, "vendor-id", &val); + if (prop) + adapter->guest->vendor = val; + + prop = read_prop_dword(np, "device-id", &val); + if (prop) + adapter->guest->device = val; + + if (cxl_verbose) { + read_prop_dword(np, "ibm,privileged-facility", &val); + read_prop_dword(np, "revision-id", &val); + read_prop_dword(np, "class-code", &val); + } + + prop = read_prop_dword(np, "subsystem-vendor-id", &val); + if (prop) + adapter->guest->subsystem_vendor = val; + + prop = read_prop_dword(np, "subsystem-id", &val); + if (prop) + adapter->guest->subsystem = val; + + if (cxl_verbose) + read_vpd(adapter, NULL); + + return 0; +} + +static int cxl_of_remove(struct platform_device *pdev) +{ + struct cxl *adapter; + int afu; + + adapter = dev_get_drvdata(&pdev->dev); + for (afu = 0; afu < adapter->slices; afu++) + cxl_guest_remove_afu(adapter->afu[afu]); + + cxl_guest_remove_adapter(adapter); + return 0; +} + +static void cxl_of_shutdown(struct platform_device *pdev) +{ + cxl_of_remove(pdev); +} + +int cxl_of_probe(struct platform_device *pdev) +{ + struct device_node *np = NULL; + struct device_node *afu_np = NULL; + struct cxl *adapter = NULL; + int ret; + int slice, slice_ok; + + pr_devel("in %s\n", __func__); + + np = pdev->dev.of_node; + if (np == NULL) + return -ENODEV; + + /* init adapter */ + adapter = cxl_guest_init_adapter(np, pdev); + if (IS_ERR(adapter)) { + dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter)); + return PTR_ERR(adapter); + } + + /* init afu */ + slice_ok = 0; + for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) { + if ((ret = cxl_guest_init_afu(adapter, slice, afu_np))) + dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n", + slice, ret); + else + slice_ok++; + } + + if (slice_ok == 0) { + dev_info(&pdev->dev, "No active AFU"); + adapter->slices = 0; + } + + if (afu_np) + of_node_put(afu_np); + return 0; +} + +static const struct of_device_id cxl_of_match[] = { + { .compatible = "ibm,coherent-platform-facility",}, + {}, +}; +MODULE_DEVICE_TABLE(of, cxl_of_match); + +struct platform_driver cxl_of_driver = { + .driver = { + .name = "cxl_of", + .of_match_table = cxl_of_match, + .owner = THIS_MODULE + }, + .probe = cxl_of_probe, + .remove = cxl_of_remove, + .shutdown = cxl_of_shutdown, +}; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a89608334ed5..2844e975bf79 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -89,8 +89,8 @@ /* This works a little different than the p1/p2 register accesses to make it * easier to pull out individual fields */ -#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) -#define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off) +#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off) +#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off) #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) @@ -115,24 +115,6 @@ #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) -u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off) -{ - u64 aligned_off = off & ~0x3L; - u32 val; - - val = cxl_afu_cr_read32(afu, cr, aligned_off); - return (val >> ((off & 0x2) * 8)) & 0xffff; -} - -u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off) -{ - u64 aligned_off = off & ~0x3L; - u32 val; - - val = cxl_afu_cr_read32(afu, cr, aligned_off); - return (val >> ((off & 0x3) * 8)) & 0xff; -} - static const struct pci_device_id cxl_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, @@ -432,8 +414,8 @@ static int init_implementation_afu_regs(struct cxl_afu *afu) return 0; } -int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, - unsigned int virq) +int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, + unsigned int virq) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); @@ -475,28 +457,30 @@ int cxl_update_image_control(struct cxl *adapter) return 0; } -int cxl_alloc_one_irq(struct cxl *adapter) +int cxl_pci_alloc_one_irq(struct cxl *adapter) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); return pnv_cxl_alloc_hwirqs(dev, 1); } -void cxl_release_one_irq(struct cxl *adapter, int hwirq) +void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); return pnv_cxl_release_hwirqs(dev, hwirq, 1); } -int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num) +int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); } -void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter) +void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); @@ -557,7 +541,7 @@ static int switch_card_to_cxl(struct pci_dev *dev) return 0; } -static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) +static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) { u64 p1n_base, p2n_base, afu_desc; const u64 p1n_size = 0x100; @@ -565,15 +549,15 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); p2n_base = p2_base(dev) + (afu->slice * p2n_size); - afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); - afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); + afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); + afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size); - if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size))) + if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) goto err; if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) goto err1; if (afu_desc) { - if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size))) + if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) goto err2; } @@ -581,62 +565,41 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p err2: iounmap(afu->p2n_mmio); err1: - iounmap(afu->p1n_mmio); + iounmap(afu->native->p1n_mmio); err: dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); return -ENOMEM; } -static void cxl_unmap_slice_regs(struct cxl_afu *afu) +static void pci_unmap_slice_regs(struct cxl_afu *afu) { if (afu->p2n_mmio) { iounmap(afu->p2n_mmio); afu->p2n_mmio = NULL; } - if (afu->p1n_mmio) { - iounmap(afu->p1n_mmio); - afu->p1n_mmio = NULL; + if (afu->native->p1n_mmio) { + iounmap(afu->native->p1n_mmio); + afu->native->p1n_mmio = NULL; } - if (afu->afu_desc_mmio) { - iounmap(afu->afu_desc_mmio); - afu->afu_desc_mmio = NULL; + if (afu->native->afu_desc_mmio) { + iounmap(afu->native->afu_desc_mmio); + afu->native->afu_desc_mmio = NULL; } } -static void cxl_release_afu(struct device *dev) +void cxl_pci_release_afu(struct device *dev) { struct cxl_afu *afu = to_cxl_afu(dev); - pr_devel("cxl_release_afu\n"); + pr_devel("%s\n", __func__); idr_destroy(&afu->contexts_idr); cxl_release_spa(afu); + kfree(afu->native); kfree(afu); } -static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) -{ - struct cxl_afu *afu; - - if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) - return NULL; - - afu->adapter = adapter; - afu->dev.parent = &adapter->dev; - afu->dev.release = cxl_release_afu; - afu->slice = slice; - idr_init(&afu->contexts_idr); - mutex_init(&afu->contexts_lock); - spin_lock_init(&afu->afu_cntl_lock); - mutex_init(&afu->spa_mutex); - - afu->prefault_mode = CXL_PREFAULT_NONE; - afu->irqs_max = afu->adapter->user_irqs; - - return afu; -} - /* Expects AFU struct to have recently been zeroed out */ static int cxl_read_afu_descriptor(struct cxl_afu *afu) { @@ -658,7 +621,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu) afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; afu->psa = AFUD_PPPSA_PSA(val); if ((afu->pp_psa = AFUD_PPPSA_PP(val))) - afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); + afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); val = AFUD_READ_CR(afu); afu->crs_len = AFUD_CR_LEN(val) * 256; @@ -685,10 +648,11 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu) static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) { - int i; + int i, rc; + u32 val; if (afu->psa && afu->adapter->ps_size < - (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { + (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); return -ENODEV; } @@ -697,7 +661,8 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); for (i = 0; i < afu->crs_num; i++) { - if ((cxl_afu_cr_read32(afu, i, 0) == 0)) { + rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); + if (rc || val == 0) { dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); return -EINVAL; } @@ -718,7 +683,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu) reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); - if (__cxl_afu_reset(afu)) + if (cxl_ops->afu_reset(afu)) return -EIO; if (cxl_afu_disable(afu)) return -EIO; @@ -766,13 +731,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte * aligned the function uses a bounce buffer which can be max PAGE_SIZE. */ -ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, +ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, loff_t off, size_t count) { loff_t aligned_start, aligned_end; size_t aligned_length; void *tbuf; - const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset; + const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) return 0; @@ -803,18 +768,18 @@ ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, return count; } -static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) +static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) { int rc; - if ((rc = cxl_map_slice_regs(afu, adapter, dev))) + if ((rc = pci_map_slice_regs(afu, adapter, dev))) return rc; if ((rc = sanitise_afu_regs(afu))) goto err1; /* We need to reset the AFU before we can read the AFU descriptor */ - if ((rc = __cxl_afu_reset(afu))) + if ((rc = cxl_ops->afu_reset(afu))) goto err1; if (cxl_verbose) @@ -829,44 +794,50 @@ static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = init_implementation_afu_regs(afu))) goto err1; - if ((rc = cxl_register_serr_irq(afu))) + if ((rc = cxl_native_register_serr_irq(afu))) goto err1; - if ((rc = cxl_register_psl_irq(afu))) + if ((rc = cxl_native_register_psl_irq(afu))) goto err2; return 0; err2: - cxl_release_serr_irq(afu); + cxl_native_release_serr_irq(afu); err1: - cxl_unmap_slice_regs(afu); + pci_unmap_slice_regs(afu); return rc; } -static void cxl_deconfigure_afu(struct cxl_afu *afu) +static void pci_deconfigure_afu(struct cxl_afu *afu) { - cxl_release_psl_irq(afu); - cxl_release_serr_irq(afu); - cxl_unmap_slice_regs(afu); + cxl_native_release_psl_irq(afu); + cxl_native_release_serr_irq(afu); + pci_unmap_slice_regs(afu); } -static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) +static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) { struct cxl_afu *afu; - int rc; + int rc = -ENOMEM; afu = cxl_alloc_afu(adapter, slice); if (!afu) return -ENOMEM; + afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); + if (!afu->native) + goto err_free_afu; + + mutex_init(&afu->native->spa_mutex); + rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); if (rc) - goto err_free; + goto err_free_native; - rc = cxl_configure_afu(afu, adapter, dev); + rc = pci_configure_afu(afu, adapter, dev); if (rc) - goto err_free; + goto err_free_native; /* Don't care if this fails */ cxl_debugfs_afu_add(afu); @@ -889,24 +860,27 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) return 0; err_put1: - cxl_deconfigure_afu(afu); + pci_deconfigure_afu(afu); cxl_debugfs_afu_remove(afu); device_unregister(&afu->dev); return rc; -err_free: +err_free_native: + kfree(afu->native); +err_free_afu: kfree(afu); return rc; } -static void cxl_remove_afu(struct cxl_afu *afu) +static void cxl_pci_remove_afu(struct cxl_afu *afu) { - pr_devel("cxl_remove_afu\n"); + pr_devel("%s\n", __func__); if (!afu) return; + cxl_pci_vphb_remove(afu); cxl_sysfs_afu_remove(afu); cxl_debugfs_afu_remove(afu); @@ -915,13 +889,13 @@ static void cxl_remove_afu(struct cxl_afu *afu) spin_unlock(&afu->adapter->afu_list_lock); cxl_context_detach_all(afu); - cxl_afu_deactivate_mode(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); - cxl_deconfigure_afu(afu); + pci_deconfigure_afu(afu); device_unregister(&afu->dev); } -int cxl_reset(struct cxl *adapter) +int cxl_pci_reset(struct cxl *adapter) { struct pci_dev *dev = to_pci_dev(adapter->dev.parent); int rc; @@ -955,17 +929,17 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); - if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) + if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) goto err3; - if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) + if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) goto err4; return 0; err4: - iounmap(adapter->p1_mmio); - adapter->p1_mmio = NULL; + iounmap(adapter->native->p1_mmio); + adapter->native->p1_mmio = NULL; err3: pci_release_region(dev, 0); err2: @@ -976,14 +950,14 @@ err1: static void cxl_unmap_adapter_regs(struct cxl *adapter) { - if (adapter->p1_mmio) { - iounmap(adapter->p1_mmio); - adapter->p1_mmio = NULL; + if (adapter->native->p1_mmio) { + iounmap(adapter->native->p1_mmio); + adapter->native->p1_mmio = NULL; pci_release_region(to_pci_dev(adapter->dev.parent), 2); } - if (adapter->p2_mmio) { - iounmap(adapter->p2_mmio); - adapter->p2_mmio = NULL; + if (adapter->native->p2_mmio) { + iounmap(adapter->native->p2_mmio); + adapter->native->p2_mmio = NULL; pci_release_region(to_pci_dev(adapter->dev.parent), 0); } } @@ -1024,10 +998,10 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) /* Convert everything to bytes, because there is NO WAY I'd look at the * code a month later and forget what units these are in ;-) */ - adapter->ps_off = ps_off * 64 * 1024; + adapter->native->ps_off = ps_off * 64 * 1024; adapter->ps_size = ps_size * 64 * 1024; - adapter->afu_desc_off = afu_desc_off * 64 * 1024; - adapter->afu_desc_size = afu_desc_size *64 * 1024; + adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; + adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; @@ -1078,21 +1052,26 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) return -EINVAL; } - if (!adapter->afu_desc_off || !adapter->afu_desc_size) { + if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); return -EINVAL; } - if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { + if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { dev_err(&dev->dev, "ABORTING: Problem state size larger than " "available in BAR2: 0x%llx > 0x%llx\n", - adapter->ps_size, p2_size(dev) - adapter->ps_off); + adapter->ps_size, p2_size(dev) - adapter->native->ps_off); return -EINVAL; } return 0; } +ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) +{ + return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); +} + static void cxl_release_adapter(struct device *dev) { struct cxl *adapter = to_cxl_adapter(dev); @@ -1101,33 +1080,10 @@ static void cxl_release_adapter(struct device *dev) cxl_remove_adapter_nr(adapter); + kfree(adapter->native); kfree(adapter); } -static struct cxl *cxl_alloc_adapter(void) -{ - struct cxl *adapter; - - if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) - return NULL; - - spin_lock_init(&adapter->afu_list_lock); - - if (cxl_alloc_adapter_nr(adapter)) - goto err1; - - if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) - goto err2; - - return adapter; - -err2: - cxl_remove_adapter_nr(adapter); -err1: - kfree(adapter); - return NULL; -} - #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31)) static int sanitise_adapter_regs(struct cxl *adapter) @@ -1191,7 +1147,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = cxl_setup_psl_timebase(adapter, dev))) goto err; - if ((rc = cxl_register_psl_err_irq(adapter))) + if ((rc = cxl_native_register_psl_err_irq(adapter))) goto err; return 0; @@ -1206,13 +1162,13 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) { struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); - cxl_release_psl_err_irq(adapter); + cxl_native_release_psl_err_irq(adapter); cxl_unmap_adapter_regs(adapter); pci_disable_device(pdev); } -static struct cxl *cxl_init_adapter(struct pci_dev *dev) +static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) { struct cxl *adapter; int rc; @@ -1221,6 +1177,12 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev) if (!adapter) return ERR_PTR(-ENOMEM); + adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); + if (!adapter->native) { + rc = -ENOMEM; + goto err_release; + } + /* Set defaults for parameters which need to persist over * configure/reconfigure */ @@ -1230,8 +1192,7 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev) rc = cxl_configure_adapter(adapter, dev); if (rc) { pci_disable_device(dev); - cxl_release_adapter(&adapter->dev); - return ERR_PTR(rc); + goto err_release; } /* Don't care if this one fails: */ @@ -1257,9 +1218,13 @@ err_put1: cxl_deconfigure_adapter(adapter); device_unregister(&adapter->dev); return ERR_PTR(rc); + +err_release: + cxl_release_adapter(&adapter->dev); + return ERR_PTR(rc); } -static void cxl_remove_adapter(struct cxl *adapter) +static void cxl_pci_remove_adapter(struct cxl *adapter) { pr_devel("cxl_remove_adapter\n"); @@ -1277,17 +1242,22 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) int slice; int rc; + if (cxl_pci_is_vphb_device(dev)) { + dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); + return -ENODEV; + } + if (cxl_verbose) dump_cxl_config_space(dev); - adapter = cxl_init_adapter(dev); + adapter = cxl_pci_init_adapter(dev); if (IS_ERR(adapter)) { dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); return PTR_ERR(adapter); } for (slice = 0; slice < adapter->slices; slice++) { - if ((rc = cxl_init_afu(adapter, slice, dev))) { + if ((rc = pci_init_afu(adapter, slice, dev))) { dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); continue; } @@ -1312,10 +1282,9 @@ static void cxl_remove(struct pci_dev *dev) */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - cxl_pci_vphb_remove(afu); - cxl_remove_afu(afu); + cxl_pci_remove_afu(afu); } - cxl_remove_adapter(adapter); + cxl_pci_remove_adapter(adapter); } static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, @@ -1461,8 +1430,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, return result; cxl_context_detach_all(afu); - cxl_afu_deactivate_mode(afu); - cxl_deconfigure_afu(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); + pci_deconfigure_afu(afu); } cxl_deconfigure_adapter(adapter); @@ -1485,14 +1454,12 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - if (cxl_configure_afu(afu, adapter, pdev)) + if (pci_configure_afu(afu, adapter, pdev)) goto err; if (cxl_afu_select_best_mode(afu)) goto err; - cxl_pci_vphb_reconfigure(afu); - list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { /* Reset the device context. * TODO: make this less disruptive @@ -1508,7 +1475,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) afu_dev->dev.archdata.cxl_ctx = ctx; - if (cxl_afu_check_and_enable(afu)) + if (cxl_ops->afu_check_and_enable(afu)) goto err; afu_dev->error_state = pci_channel_io_normal; diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 038af5d45145..25913c08794c 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -69,7 +69,7 @@ static ssize_t reset_adapter_store(struct device *device, if ((rc != 1) || (val != 1)) return -EINVAL; - if ((rc = cxl_reset(adapter))) + if ((rc = cxl_ops->adapter_reset(adapter))) return rc; return count; } @@ -165,7 +165,7 @@ static ssize_t pp_mmio_off_show(struct device *device, { struct cxl_afu *afu = to_afu_chardev_m(device); - return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset); + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset); } static ssize_t pp_mmio_len_show(struct device *device, @@ -211,7 +211,7 @@ static ssize_t reset_store_afu(struct device *device, goto err; } - if ((rc = __cxl_afu_reset(afu))) + if ((rc = cxl_ops->afu_reset(afu))) goto err; rc = count; @@ -253,8 +253,14 @@ static ssize_t irqs_max_store(struct device *device, if (irqs_max < afu->pp_irqs) return -EINVAL; - if (irqs_max > afu->adapter->user_irqs) - return -EINVAL; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (irqs_max > afu->adapter->user_irqs) + return -EINVAL; + } else { + /* pHyp sets a per-AFU limit */ + if (irqs_max > afu->guest->max_ints) + return -EINVAL; + } afu->irqs_max = irqs_max; return count; @@ -348,7 +354,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, } /* - * cxl_afu_deactivate_mode needs to be done outside the lock, prevent + * afu_deactivate_mode needs to be done outside the lock, prevent * other contexts coming in before we are ready: */ old_mode = afu->current_mode; @@ -357,9 +363,9 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, mutex_unlock(&afu->contexts_lock); - if ((rc = _cxl_afu_deactivate_mode(afu, old_mode))) + if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode))) return rc; - if ((rc = cxl_afu_activate_mode(afu, mode))) + if ((rc = cxl_ops->afu_activate_mode(afu, mode))) return rc; return count; @@ -388,7 +394,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, { struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); - return cxl_afu_read_err_buffer(afu, buf, off, count); + return cxl_ops->afu_read_err_buffer(afu, buf, off, count); } static struct device_attribute afu_attrs[] = { @@ -405,24 +411,39 @@ static struct device_attribute afu_attrs[] = { int cxl_sysfs_adapter_add(struct cxl *adapter) { + struct device_attribute *dev_attr; int i, rc; for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { - if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i]))) - goto err; + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) { + if ((rc = device_create_file(&adapter->dev, dev_attr))) + goto err; + } } return 0; err: - for (i--; i >= 0; i--) - device_remove_file(&adapter->dev, &adapter_attrs[i]); + for (i--; i >= 0; i--) { + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) + device_remove_file(&adapter->dev, dev_attr); + } return rc; } + void cxl_sysfs_adapter_remove(struct cxl *adapter) { + struct device_attribute *dev_attr; int i; - for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) - device_remove_file(&adapter->dev, &adapter_attrs[i]); + for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) + device_remove_file(&adapter->dev, dev_attr); + } } struct afu_config_record { @@ -468,10 +489,12 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, struct afu_config_record *cr = to_cr(kobj); struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); - u64 i, j, val; + u64 i, j, val, rc; for (i = 0; i < count;) { - val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7); + rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val); + if (rc) + val = ~0ULL; for (j = off & 0x7; j < 8 && i < count; i++, j++, off++) buf[i] = (val >> (j * 8)) & 0xff; } @@ -516,14 +539,22 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c return ERR_PTR(-ENOMEM); cr->cr = cr_idx; - cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID); - cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID); - cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8; + + rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device); + if (rc) + goto err; + rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor); + if (rc) + goto err; + rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class); + if (rc) + goto err; + cr->class >>= 8; /* * Export raw AFU PCIe like config record. For now this is read only by * root - we can expand that later to be readable by non-root and maybe - * even writable provided we have a good use-case. Once we suport + * even writable provided we have a good use-case. Once we support * exposing AFUs through a virtual PHB they will get that for free from * Linux' PCI infrastructure, but until then it's not clear that we * need it for anything since the main use case is just identifying @@ -561,6 +592,7 @@ err: void cxl_sysfs_afu_remove(struct cxl_afu *afu) { + struct device_attribute *dev_attr; struct afu_config_record *cr, *tmp; int i; @@ -568,8 +600,12 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu) if (afu->eb_len) device_remove_bin_file(&afu->dev, &afu->attr_eb); - for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) - device_remove_file(&afu->dev, &afu_attrs[i]); + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) + device_remove_file(&afu->dev, &afu_attrs[i]); + } list_for_each_entry_safe(cr, tmp, &afu->crs, list) { sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); @@ -579,14 +615,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu) int cxl_sysfs_afu_add(struct cxl_afu *afu) { + struct device_attribute *dev_attr; struct afu_config_record *cr; int i, rc; INIT_LIST_HEAD(&afu->crs); for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { - if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) - goto err; + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) { + if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) + goto err; + } } /* conditionally create the add the binary file for error info buffer */ @@ -625,32 +666,50 @@ err: /* reset the eb_len as we havent created the bin attr */ afu->eb_len = 0; - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) device_remove_file(&afu->dev, &afu_attrs[i]); + } return rc; } int cxl_sysfs_afu_m_add(struct cxl_afu *afu) { + struct device_attribute *dev_attr; int i, rc; for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { - if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) - goto err; + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) { + if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) + goto err; + } } return 0; err: - for (i--; i >= 0; i--) - device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + for (i--; i >= 0; i--) { + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) + device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + } return rc; } void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) { + struct device_attribute *dev_attr; int i; - for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) - device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) + device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + } } diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h index 6e1e2adfba8e..751d6119683e 100644 --- a/drivers/misc/cxl/trace.h +++ b/drivers/misc/cxl/trace.h @@ -450,6 +450,199 @@ DEFINE_EVENT(cxl_pe_class, cxl_slbia, TP_ARGS(ctx) ); +TRACE_EVENT(cxl_hcall, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + + TP_ARGS(unit_address, process_token, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, process_token) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->process_token = process_token; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li", + __entry->unit_address, + __entry->process_token, + __entry->rc + ) +); + +TRACE_EVENT(cxl_hcall_control, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(char *, fct) + __field(u64, p1) + __field(u64, p2) + __field(u64, p3) + __field(u64, p4) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->fct = fct; + __entry->p1 = p1; + __entry->p2 = p2; + __entry->p3 = p3; + __entry->p4 = p4; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li", + __entry->unit_address, + __entry->fct, + __entry->p1, + __entry->p2, + __entry->p3, + __entry->p4, + __entry->r4, + __entry->rc + ) +); + +TRACE_EVENT(cxl_hcall_attach, + TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token, + unsigned long mmio_addr, unsigned long mmio_size, long rc), + + TP_ARGS(unit_address, phys_addr, process_token, + mmio_addr, mmio_size, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, phys_addr) + __field(unsigned long, process_token) + __field(unsigned long, mmio_addr) + __field(unsigned long, mmio_size) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->phys_addr = phys_addr; + __entry->process_token = process_token; + __entry->mmio_addr = mmio_addr; + __entry->mmio_size = mmio_size; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx phys_addr=0x%016llx " + "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li", + __entry->unit_address, + __entry->phys_addr, + __entry->process_token, + __entry->mmio_addr, + __entry->mmio_size, + __entry->rc + ) +); + +DEFINE_EVENT(cxl_hcall, cxl_hcall_detach, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + TP_ARGS(unit_address, process_token, rc) +); + +DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) +); + +DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + TP_ARGS(unit_address, process_token, rc) +); + +TRACE_EVENT(cxl_hcall_control_faults, + TP_PROTO(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask, unsigned long r4, + long rc), + + TP_ARGS(unit_address, process_token, + control_mask, reset_mask, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, process_token) + __field(u64, control_mask) + __field(u64, reset_mask) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->process_token = process_token; + __entry->control_mask = control_mask; + __entry->reset_mask = reset_mask; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx process_token=0x%llx " + "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li", + __entry->unit_address, + __entry->process_token, + __entry->control_mask, + __entry->reset_mask, + __entry->r4, + __entry->rc + ) +); + +DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) +); + +TRACE_EVENT(cxl_hcall_download_facility, + TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num, + unsigned long r4, long rc), + + TP_ARGS(unit_address, fct, list_address, num, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(char *, fct) + __field(u64, list_address) + __field(u64, num) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->fct = fct; + __entry->list_address = list_address; + __entry->num = num; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li", + __entry->unit_address, + __entry->fct, + __entry->list_address, + __entry->num, + __entry->r4, + __entry->rc + ) +); + #endif /* _CXL_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index cbd4331fb45c..cdc7723b845d 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -49,7 +49,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; - if (!cxl_adapter_link_ok(afu->adapter)) { + if (!cxl_ops->link_ok(afu->adapter, afu)) { dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); return false; } @@ -66,7 +66,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) return false; dev->dev.archdata.cxl_ctx = ctx; - return (cxl_afu_check_and_enable(afu) == 0); + return (cxl_ops->afu_check_and_enable(afu) == 0); } static void cxl_pci_disable_device(struct pci_dev *dev) @@ -99,113 +99,90 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn) return (bus << 8) + devfn; } -static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb, - u8 bus, u8 devfn, int offset) -{ - int record = cxl_pcie_cfg_record(bus, devfn); - - return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset; -} - - static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, - int offset, int len, - volatile void __iomem **ioaddr, - u32 *mask, int *shift) + struct cxl_afu **_afu, int *_record) { struct pci_controller *phb; struct cxl_afu *afu; - unsigned long addr; + int record; phb = pci_bus_to_host(bus); if (phb == NULL) return PCIBIOS_DEVICE_NOT_FOUND; - afu = (struct cxl_afu *)phb->private_data; - if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) + afu = (struct cxl_afu *)phb->private_data; + record = cxl_pcie_cfg_record(bus->number, devfn); + if (record > afu->crs_num) return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= (unsigned long)phb->cfg_data) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset); - *ioaddr = (void *)(addr & ~0x3ULL); - *shift = ((addr & 0x3) * 8); - switch (len) { - case 1: - *mask = 0xff; - break; - case 2: - *mask = 0xffff; - break; - default: - *mask = 0xffffffff; - break; - } + *_afu = afu; + *_record = record; return 0; } - -static inline bool cxl_config_link_ok(struct pci_bus *bus) -{ - struct pci_controller *phb; - struct cxl_afu *afu; - - /* Config space IO is based on phb->cfg_addr, which is based on - * afu_desc_mmio. This isn't safe to read/write when the link - * goes down, as EEH tears down MMIO space. - * - * Check if the link is OK before proceeding. - */ - - phb = pci_bus_to_host(bus); - if (phb == NULL) - return false; - afu = (struct cxl_afu *)phb->private_data; - return cxl_adapter_link_ok(afu->adapter); -} - static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { - volatile void __iomem *ioaddr; - int shift, rc; - u32 mask; + int rc, record; + struct cxl_afu *afu; + u8 val8; + u16 val16; + u32 val32; - rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, - &mask, &shift); + rc = cxl_pcie_config_info(bus, devfn, &afu, &record); if (rc) return rc; - if (!cxl_config_link_ok(bus)) + switch (len) { + case 1: + rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8); + *val = val8; + break; + case 2: + rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16); + *val = val16; + break; + case 4: + rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32); + *val = val32; + break; + default: + WARN_ON(1); + } + + if (rc) return PCIBIOS_DEVICE_NOT_FOUND; - /* Can only read 32 bits */ - *val = (in_le32(ioaddr) >> shift) & mask; return PCIBIOS_SUCCESSFUL; } static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { - volatile void __iomem *ioaddr; - u32 v, mask; - int shift, rc; + int rc, record; + struct cxl_afu *afu; - rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, - &mask, &shift); + rc = cxl_pcie_config_info(bus, devfn, &afu, &record); if (rc) return rc; - if (!cxl_config_link_ok(bus)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Can only write 32 bits so do read-modify-write */ - mask <<= shift; - val <<= shift; + switch (len) { + case 1: + rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff); + break; + case 2: + rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff); + break; + case 4: + rc = cxl_ops->afu_cr_write32(afu, record, offset, val); + break; + default: + WARN_ON(1); + } - v = (in_le32(ioaddr) & ~mask) | (val & mask); + if (rc) + return PCIBIOS_SET_FAILED; - out_le32(ioaddr, v); return PCIBIOS_SUCCESSFUL; } @@ -233,23 +210,31 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) { struct pci_dev *phys_dev; struct pci_controller *phb, *phys_phb; - - phys_dev = to_pci_dev(afu->adapter->dev.parent); - phys_phb = pci_bus_to_host(phys_dev->bus); + struct device_node *vphb_dn; + struct device *parent; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + phys_dev = to_pci_dev(afu->adapter->dev.parent); + phys_phb = pci_bus_to_host(phys_dev->bus); + vphb_dn = phys_phb->dn; + parent = &phys_dev->dev; + } else { + vphb_dn = afu->adapter->dev.parent->of_node; + parent = afu->adapter->dev.parent; + } /* Alloc and setup PHB data structure */ - phb = pcibios_alloc_controller(phys_phb->dn); - + phb = pcibios_alloc_controller(vphb_dn); if (!phb) return -ENODEV; /* Setup parent in sysfs */ - phb->parent = &phys_dev->dev; + phb->parent = parent; /* Setup the PHB using arch provided callback */ phb->ops = &cxl_pcie_pci_ops; - phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; - phb->cfg_data = (void *)(u64)afu->crs_len; + phb->cfg_addr = NULL; + phb->cfg_data = 0; phb->private_data = afu; phb->controller_ops = cxl_pci_controller_ops; @@ -272,15 +257,6 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) return 0; } -void cxl_pci_vphb_reconfigure(struct cxl_afu *afu) -{ - /* When we are reconfigured, the AFU's MMIO space is unmapped - * and remapped. We need to reflect this in the PHB's view of - * the world. - */ - afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; -} - void cxl_pci_vphb_remove(struct cxl_afu *afu) { struct pci_controller *phb; @@ -296,6 +272,15 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) pcibios_free_controller(phb); } +bool cxl_pci_is_vphb_device(struct pci_dev *dev) +{ + struct pci_controller *phb; + + phb = pci_bus_to_host(dev->bus); + + return (phb->ops == &cxl_pcie_pci_ops); +} + struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) { struct pci_controller *phb; |