diff options
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r-- | drivers/misc/cxl/native.c | 60 |
1 files changed, 55 insertions, 5 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 387fcbdf9793..55d8a1459f28 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -14,6 +14,7 @@ #include <linux/mutex.h> #include <linux/mm.h> #include <linux/uaccess.h> +#include <linux/delay.h> #include <asm/synch.h> #include <misc/cxl-base.h> @@ -185,16 +186,25 @@ static int spa_max_procs(int spa_size) int cxl_alloc_spa(struct cxl_afu *afu) { + unsigned spa_size; + /* Work out how many pages to allocate */ afu->native->spa_order = 0; do { afu->native->spa_order++; - afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + + if (spa_size > 0x100000) { + dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", + afu->native->spa_max_procs, afu->native->spa_size); + afu->num_procs = afu->native->spa_max_procs; + break; + } + + afu->native->spa_size = spa_size; afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); } while (afu->native->spa_max_procs < afu->num_procs); - WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */ - if (!(afu->native->spa = (struct cxl_process_element *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); @@ -485,8 +495,9 @@ static u64 calculate_sr(struct cxl_context *ctx) if (mfspr(SPRN_LPCR) & LPCR_TC) sr |= CXL_PSL_SR_An_TC; if (ctx->kernel) { - sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF); - sr |= CXL_PSL_SR_An_HV; + if (!ctx->real_mode) + sr |= CXL_PSL_SR_An_R; + sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; } else { sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; sr &= ~(CXL_PSL_SR_An_HV); @@ -525,6 +536,15 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); + /* + * Ensure we have the multiplexed PSL interrupt set up to take faults + * for kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } + for (r = 0; r < CXL_IRQ_RANGES; r++) { ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); @@ -797,6 +817,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) return fail_psl_irq(afu, &irq_info); } +void native_irq_wait(struct cxl_context *ctx) +{ + u64 dsisr; + int timeout = 1000; + int ph; + + /* + * Wait until no further interrupts are presented by the PSL + * for this context. + */ + while (timeout--) { + ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; + if (ph != ctx->pe) + return; + dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); + if ((dsisr & CXL_PSL_DSISR_PENDING) == 0) + return; + /* + * We are waiting for the workqueue to process our + * irq, so need to let that run here. + */ + msleep(1); + } + + dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" + " DSISR %016llx!\n", ph, dsisr); + return; +} + static irqreturn_t native_slice_irq_err(int irq, void *data) { struct cxl_afu *afu = data; @@ -1076,6 +1125,7 @@ const struct cxl_backend_ops cxl_native_ops = { .handle_psl_slice_error = native_handle_psl_slice_error, .psl_interrupt = NULL, .ack_irq = native_ack_irq, + .irq_wait = native_irq_wait, .attach_process = native_attach_process, .detach_process = native_detach_process, .support_attributes = native_support_attributes, |