diff options
Diffstat (limited to 'drivers/crypto/hisilicon/qm.c')
-rw-r--r-- | drivers/crypto/hisilicon/qm.c | 906 |
1 files changed, 547 insertions, 359 deletions
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ad83c194d664..8b387de69d22 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -22,20 +22,17 @@ #define QM_VF_AEQ_INT_MASK 0x4 #define QM_VF_EQ_INT_SOURCE 0x8 #define QM_VF_EQ_INT_MASK 0xc -#define QM_IRQ_NUM_V1 1 -#define QM_IRQ_NUM_PF_V2 4 -#define QM_IRQ_NUM_VF_V2 2 -#define QM_IRQ_NUM_VF_V3 3 -#define QM_EQ_EVENT_IRQ_VECTOR 0 -#define QM_AEQ_EVENT_IRQ_VECTOR 1 -#define QM_CMD_EVENT_IRQ_VECTOR 2 -#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 +#define QM_IRQ_VECTOR_MASK GENMASK(15, 0) +#define QM_IRQ_TYPE_MASK GENMASK(15, 0) +#define QM_IRQ_TYPE_SHIFT 16 +#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) /* mailbox */ #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) +#define QM_MB_STATUS_MASK GENMASK(12, 9) /* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 @@ -77,6 +74,9 @@ #define QM_EQ_OVERFLOW 1 #define QM_CQE_ERROR 2 +#define QM_XQ_DEPTH_SHIFT 16 +#define QM_XQ_DEPTH_MASK GENMASK(15, 0) + #define QM_DOORBELL_CMD_SQ 0 #define QM_DOORBELL_CMD_CQ 1 #define QM_DOORBELL_CMD_EQ 2 @@ -86,11 +86,7 @@ #define QM_DB_CMD_SHIFT_V1 16 #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 -#define QM_QUE_ISO_CFG_V 0x0030 #define QM_PAGE_SIZE 0x0034 -#define QM_QUE_ISO_EN 0x100154 -#define QM_CAPBILITY 0x100158 -#define QM_QP_NUN_MASK GENMASK(10, 0) #define QM_QP_DB_INTERVAL 0x10000 #define QM_MEM_START_INIT 0x100040 @@ -126,7 +122,6 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 -#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff #define QM_ABNORMAL_INT_STATUS 0x100008 @@ -144,8 +139,10 @@ #define QM_RAS_NFE_ENABLE 0x1000f4 #define QM_RAS_CE_THRESHOLD 0x1000f8 #define QM_RAS_CE_TIMES_PER_IRQ 1 -#define QM_RAS_MSI_INT_SEL 0x1040f4 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 +#define QM_ECC_MBIT BIT(2) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) #define QM_RESET_WAIT_TIMEOUT 400 #define QM_PEH_VENDOR_ID 0x1000d8 @@ -205,6 +202,8 @@ #define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 +#define QM_FUNC_CAPS_REG 0x3100 +#define QM_CAPBILITY_VERSION GENMASK(7, 0) #define PCI_BAR_2 2 #define PCI_BAR_4 4 @@ -221,7 +220,6 @@ #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 #define QM_SQE_ADDR_MASK GENMASK(7, 0) -#define QM_EQ_DEPTH (1024 * 2) #define QM_DRIVER_REMOVING 0 #define QM_RST_SCHED 1 @@ -270,8 +268,8 @@ ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) -#define QM_MK_CQC_DW3_V2(cqe_sz) \ - ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) +#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ + ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_SQC_W13(priority, orders, alg_type) \ (((priority) << QM_SQ_PRIORITY_SHIFT) | \ @@ -284,8 +282,8 @@ ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) -#define QM_MK_SQC_DW3_V2(sqe_sz) \ - ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) +#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ + ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) #define INIT_QC_COMMON(qc, base, pasid) do { \ (qc)->head = 0; \ @@ -329,6 +327,48 @@ enum qm_mb_cmd { QM_VF_GET_QOS, }; +enum qm_basic_type { + QM_TOTAL_QP_NUM_CAP = 0x0, + QM_FUNC_MAX_QP_CAP, + QM_XEQ_DEPTH_CAP, + QM_QP_DEPTH_CAP, + QM_EQ_IRQ_TYPE_CAP, + QM_AEQ_IRQ_TYPE_CAP, + QM_ABN_IRQ_TYPE_CAP, + QM_PF2VF_IRQ_TYPE_CAP, + QM_PF_IRQ_NUM_CAP, + QM_VF_IRQ_NUM_CAP, +}; + +static const struct hisi_qm_cap_info qm_cap_info_comm[] = { + {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, + {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, + {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, + {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, + {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, +}; + +static const struct hisi_qm_cap_info qm_cap_info_pf[] = { + {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, +}; + +static const struct hisi_qm_cap_info qm_cap_info_vf[] = { + {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, +}; + +static const struct hisi_qm_cap_info qm_basic_info[] = { + {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, + {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, + {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, + {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, + {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, + {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, + {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, + {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, + {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, + {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, +}; + struct qm_cqe { __le32 rsvd0; __le16 cmd_id; @@ -421,15 +461,11 @@ struct hisi_qm_hw_ops { int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); void (*qm_db)(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority); - u32 (*get_irq_num)(struct hisi_qm *qm); int (*debug_init)(struct hisi_qm *qm); - void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); + void (*hw_error_init)(struct hisi_qm *qm); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); - int (*stop_qp)(struct hisi_qp *qp); int (*set_msi)(struct hisi_qm *qm, bool set); - int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd); - int (*ping_pf)(struct hisi_qm *qm, u64 cmd); }; struct qm_dfx_item { @@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { {50100, 100000, 19} }; +static void qm_irqs_unregister(struct hisi_qm *qm); + static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) { enum qm_state curr = atomic_read(&qm->status.flags); @@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm) } /* Check if the error causes the master ooo block */ -static int qm_check_dev_error(struct hisi_qm *qm) +static bool qm_check_dev_error(struct hisi_qm *qm) { u32 val, dev_val; if (qm->fun_type == QM_HW_VF) - return 0; + return false; - val = qm_get_hw_error_status(qm); - dev_val = qm_get_dev_err_status(qm); + val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; + dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; - if (qm->ver < QM_HW_V3) - return (val & QM_ECC_MBIT) || - (dev_val & qm->err_info.ecc_2bits_mask); - - return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || - (dev_val & (~qm->err_info.dev_ce_mask)); + return val || dev_val; } static int qm_wait_reset_finish(struct hisi_qm *qm) @@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) { + int ret; + u32 val; + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + ret = -EBUSY; goto mb_busy; } @@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); + ret = -ETIMEDOUT; + goto mb_busy; + } + + val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); + if (val & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); + ret = -EIO; goto mb_busy; } @@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) mb_busy: atomic64_inc(&qm->debug.dfx.mb_err_cnt); - return -EBUSY; + return ret; } int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, @@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm) POLL_TIMEOUT); } -static u32 qm_get_irq_num_v1(struct hisi_qm *qm) +/** + * hisi_qm_get_hw_info() - Get device information. + * @qm: The qm which want to get information. + * @info_table: Array for storing device information. + * @index: Index in info_table. + * @is_read: Whether read from reg, 0: not support read from reg. + * + * This function returns device information the caller needs. + */ +u32 hisi_qm_get_hw_info(struct hisi_qm *qm, + const struct hisi_qm_cap_info *info_table, + u32 index, bool is_read) { - return QM_IRQ_NUM_V1; + u32 val; + + switch (qm->ver) { + case QM_HW_V1: + return info_table[index].v1_val; + case QM_HW_V2: + return info_table[index].v2_val; + default: + if (!is_read) + return info_table[index].v3_val; + + val = readl(qm->io_base + info_table[index].offset); + return (val >> info_table[index].shift) & info_table[index].mask; + } } +EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); -static u32 qm_get_irq_num_v2(struct hisi_qm *qm) +static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, + u16 *high_bits, enum qm_basic_type type) { - if (qm->fun_type == QM_HW_PF) - return QM_IRQ_NUM_PF_V2; - else - return QM_IRQ_NUM_VF_V2; + u32 depth; + + depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); + *high_bits = depth & QM_XQ_DEPTH_MASK; + *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } -static u32 qm_get_irq_num_v3(struct hisi_qm *qm) +static u32 qm_get_irq_num(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) - return QM_IRQ_NUM_PF_V2; + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); - return QM_IRQ_NUM_VF_V3; + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); } static int qm_pm_get_sync(struct hisi_qm *qm) @@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; int ret; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return 0; ret = pm_runtime_resume_and_get(dev); @@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_mark_last_busy(dev); @@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm) static void qm_cq_head_update(struct hisi_qp *qp) { - if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { + if (qp->qp_status.cq_head == qp->cq_depth - 1) { qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; qp->qp_status.cq_head = 0; } else { @@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) { struct hisi_qm *qm = poll_data->qm; struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + u16 eq_depth = qm->eq_depth; int eqe_num = 0; u16 cqn; @@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) poll_data->qp_finish_id[eqe_num] = cqn; eqe_num++; - if (qm->status.eq_head == QM_EQ_DEPTH - 1) { + if (qm->status.eq_head == eq_depth - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; eqe = qm->eqe; qm->status.eq_head = 0; @@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) qm->status.eq_head++; } - if (eqe_num == (QM_EQ_DEPTH >> 1) - 1) + if (eqe_num == (eq_depth >> 1) - 1) break; } @@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { @@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) break; } - if (qm->status.aeq_head == QM_Q_DEPTH - 1) { + if (qm->status.aeq_head == aeq_depth - 1) { qm->status.aeqc_phase = !qm->status.aeqc_phase; aeqe = qm->aeqe; qm->status.aeq_head = 0; @@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) return IRQ_WAKE_THREAD; } -static void qm_irq_unregister(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - - if (qm->ver > QM_HW_V1) { - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); - - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); - } - - if (qm->ver > QM_HW_V2) - free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm); -} - static void qm_init_qp_status(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; @@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; u32 page_type = 0x0; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; switch (PAGE_SIZE) { @@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, } break; case SHAPER_VFT: - if (qm->ver >= QM_HW_V3) { + if (factor) { tmp = factor->cir_b | (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | @@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, u32 fun_num, u32 base, u32 number) { - struct qm_shaper_factor *factor = &qm->factor[fun_num]; + struct qm_shaper_factor *factor = NULL; unsigned int val; int ret; + if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + factor = &qm->factor[fun_num]; + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); @@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, } /* init default shaper qos val */ - if (qm->ver >= QM_HW_V3) { + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { ret = qm_shaper_init_vft(qm, fun_num); if (ret) goto back_sqc_cqc; @@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, return 0; back_sqc_cqc: - for (i = SQC_VFT; i <= CQC_VFT; i++) { - ret = qm_set_vft_common(qm, i, fun_num, 0, 0); - if (ret) - return ret; - } + for (i = SQC_VFT; i <= CQC_VFT; i++) + qm_set_vft_common(qm, i, fun_num, 0, 0); + return ret; } @@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, kfree(ctx_addr); } -static int dump_show(struct hisi_qm *qm, void *info, +static void dump_show(struct hisi_qm *qm, void *info, unsigned int info_size, char *info_name) { struct device *dev = &qm->pdev->dev; - u8 *info_buf, *info_curr = info; + u8 *info_curr = info; u32 i; #define BYTE_PER_DW 4 - info_buf = kzalloc(info_size, GFP_KERNEL); - if (!info_buf) - return -ENOMEM; - - for (i = 0; i < info_size; i++, info_curr++) { - if (i % BYTE_PER_DW == 0) - info_buf[i + 3UL] = *info_curr; - else if (i % BYTE_PER_DW == 1) - info_buf[i + 1UL] = *info_curr; - else if (i % BYTE_PER_DW == 2) - info_buf[i - 1] = *info_curr; - else if (i % BYTE_PER_DW == 3) - info_buf[i - 3] = *info_curr; - } - dev_info(dev, "%s DUMP\n", info_name); - for (i = 0; i < info_size; i += BYTE_PER_DW) { + for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, - info_buf[i], info_buf[i + 1UL], - info_buf[i + 2UL], info_buf[i + 3UL]); + *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); } - - kfree(info_buf); - - return 0; } static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) @@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s) if (qm->sqc) { sqc_curr = qm->sqc + qp_id; - ret = dump_show(qm, sqc_curr, sizeof(*sqc), - "SOFT SQC"); - if (ret) - dev_info(dev, "Show soft sqc failed!\n"); + dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); - if (ret) - dev_info(dev, "Show hw sqc failed!\n"); + dump_show(qm, sqc, sizeof(*sqc), "SQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); - return ret; + return 0; } static int qm_cqc_dump(struct hisi_qm *qm, const char *s) @@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s) if (qm->cqc) { cqc_curr = qm->cqc + qp_id; - ret = dump_show(qm, cqc_curr, sizeof(*cqc), - "SOFT CQC"); - if (ret) - dev_info(dev, "Show soft cqc failed!\n"); + dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); - if (ret) - dev_info(dev, "Show hw cqc failed!\n"); + dump_show(qm, cqc, sizeof(*cqc), "CQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); - return ret; + return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, @@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, if (ret) goto err_free_ctx; - ret = dump_show(qm, xeqc, size, name); - if (ret) - dev_info(dev, "Show hw %s failed!\n", name); + dump_show(qm, xeqc, size, name); err_free_ctx: qm_ctx_free(qm, size, xeqc, &xeqc_dma); @@ -2025,7 +2050,7 @@ err_free_ctx: } static int q_dump_param_parse(struct hisi_qm *qm, char *s, - u32 *e_id, u32 *q_id) + u32 *e_id, u32 *q_id, u16 q_depth) { struct device *dev = &qm->pdev->dev; unsigned int qp_num = qm->qp_num; @@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, } ret = kstrtou32(presult, 0, e_id); - if (ret || *e_id >= QM_Q_DEPTH) { - dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); + if (ret || *e_id >= q_depth) { + dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); return -EINVAL; } @@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, static int qm_sq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; + u16 sq_depth = qm->qp_array->cq_depth; void *sqe, *sqe_curr; struct hisi_qp *qp; u32 qp_id, sqe_id; int ret; - ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); if (ret) return ret; - sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); + sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); if (!sqe) return -ENOMEM; qp = &qm->qp_array[qp_id]; - memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); + memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); - ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); - if (ret) - dev_info(dev, "Show sqe failed!\n"); + dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); kfree(sqe); - return ret; + return 0; } static int qm_cq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; struct qm_cqe *cqe_curr; struct hisi_qp *qp; u32 qp_id, cqe_id; int ret; - ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); if (ret) return ret; qp = &qm->qp_array[qp_id]; cqe_curr = qp->cqe + cqe_id; - ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - if (ret) - dev_info(dev, "Show cqe failed!\n"); + dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - return ret; + return 0; } static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, @@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, if (ret) return -EINVAL; - if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) { - dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); + if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) { + dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1); return -EINVAL; - } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) { - dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); + } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) { + dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1); return -EINVAL; } @@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, goto err_unlock; } - ret = dump_show(qm, xeqe, size, name); - if (ret) - dev_info(dev, "Show %s failed!\n", name); + dump_show(qm, xeqe, size, name); err_unlock: up_read(&qm->qps_lock); @@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, return ret; /* Judge if the instance is being reset. */ - if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) - return 0; + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { + ret = 0; + goto put_dfx_access; + } if (count > QM_DBG_WRITE_LEN) { ret = -ENOSPC; @@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, file->debug = &qm->debug; } -static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_cfg(struct hisi_qm *qm) { - qm->error_mask = ce | nfe | fe; + struct hisi_qm_err_info *err_info = &qm->err_info; + + qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; /* clear QM hw residual error source */ - writel(QM_ABNORMAL_INT_SOURCE_CLR, - qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ - writel(ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); - writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(fe, qm->io_base + QM_RAS_FE_ENABLE); + writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); } -static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v2(struct hisi_qm *qm) { - u32 irq_enable = ce | nfe | fe; - u32 irq_unmask = ~irq_enable; + u32 irq_unmask; - qm_hw_error_cfg(qm, ce, nfe, fe); + qm_hw_error_cfg(qm); + irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v2(struct hisi_qm *qm) { - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); + u32 irq_mask = qm->error_mask; + + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v3(struct hisi_qm *qm) { - u32 irq_enable = ce | nfe | fe; - u32 irq_unmask = ~irq_enable; + u32 irq_unmask; - qm_hw_error_cfg(qm, ce, nfe, fe); + qm_hw_error_cfg(qm); /* enable close master ooo when hardware error happened */ - writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL); + writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v3(struct hisi_qm *qm) { - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); + u32 irq_mask = qm->error_mask; + + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); /* disable close master ooo when hardware error happened */ writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); @@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { - u32 error_status, tmp, val; + u32 error_status, tmp; /* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); @@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE; - /* ce error does not need to be reset */ - if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { - writel(error_status, qm->io_base + - QM_ABNORMAL_INT_SOURCE); - writel(qm->err_info.nfe, - qm->io_base + QM_RAS_NFE_ENABLE); - return ACC_ERR_RECOVERED; - } + if (error_status & qm->err_info.qm_reset_mask) + return ACC_ERR_NEED_RESET; - return ACC_ERR_NEED_RESET; + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); } return ACC_ERR_RECOVERED; @@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) u64 val; u32 i; - if (!qm->vfs_num || qm->ver < QM_HW_V3) + if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return 0; while (true) { @@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set) static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, - .get_irq_num = qm_get_irq_num_v1, .hw_error_init = qm_hw_error_init_v1, .set_msi = qm_set_msi, }; @@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, - .get_irq_num = qm_get_irq_num_v2, .hw_error_init = qm_hw_error_init_v2, .hw_error_uninit = qm_hw_error_uninit_v2, .hw_error_handle = qm_hw_error_handle_v2, @@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, - .get_irq_num = qm_get_irq_num_v3, .hw_error_init = qm_hw_error_init_v3, .hw_error_uninit = qm_hw_error_uninit_v3, .hw_error_handle = qm_hw_error_handle_v2, - .stop_qp = qm_stop_qp, .set_msi = qm_set_msi_v3, - .ping_all_vfs = qm_ping_all_vfs, - .ping_pf = qm_ping_pf, }; static void *qm_get_avail_sqe(struct hisi_qp *qp) @@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; - if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) + if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) return NULL; return qp->sqe + sq_tail * qp->qm->sqe_size; @@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) qp = &qm->qp_array[qp_id]; hisi_qm_unset_hw_reset(qp); - memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); qp->event_cb = NULL; qp->req_cb = NULL; @@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); - sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); + sqc->w8 = cpu_to_le16(qp->sq_depth - 1); } else { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); + sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); sqc->w8 = 0; /* rand_qc */ } sqc->cq_num = cpu_to_le16(qp_id); @@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) if (ver == QM_HW_V1) { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); - cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); + cqc->w8 = cpu_to_le16(qp->cq_depth - 1); } else { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); + cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); cqc->w8 = 0; /* rand_qc */ } cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); @@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) { int qp_used = atomic_read(&qp->qp_status.used); u16 cur_tail = qp->qp_status.sq_tail; - u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; + u16 sq_depth = qp->sq_depth; + u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; struct hisi_qm *qm = qp->qm; u16 pos; int i; for (i = 0; i < qp_used; i++) { - pos = (i + cur_head) % QM_Q_DEPTH; + pos = (i + cur_head) % sq_depth; qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); atomic_dec(&qp->qp_status.used); } @@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp) return 0; /* Kunpeng930 supports drain qp by device */ - if (qm->ops->stop_qp) { - ret = qm->ops->stop_qp(qp); + if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { + ret = qm_stop_qp(qp); if (ret) dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); return ret; @@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) { struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; - u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; + u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; void *sqe = qm_get_avail_sqe(qp); if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || @@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; - hisi_qm_cache_wb(qp->qm); hisi_qm_release_qp(qp); } @@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, if (qm->ver == QM_HW_V1) { if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; - } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { + } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; @@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, unsigned long arg) { struct hisi_qp *qp = q->priv; + struct hisi_qp_info qp_info; struct hisi_qp_ctx qp_ctx; if (cmd == UACCE_CMD_QM_SET_QP_CTX) { @@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, if (copy_to_user((void __user *)arg, &qp_ctx, sizeof(struct hisi_qp_ctx))) return -EFAULT; - } else { - return -EINVAL; + + return 0; + } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { + if (copy_from_user(&qp_info, (void __user *)arg, + sizeof(struct hisi_qp_info))) + return -EFAULT; + + qp_info.sqe_size = qp->qm->sqe_size; + qp_info.sq_depth = qp->sq_depth; + qp_info.cq_depth = qp->cq_depth; + + if (copy_to_user((void __user *)arg, &qp_info, + sizeof(struct hisi_qp_info))) + return -EFAULT; + + return 0; } - return 0; + return -EINVAL; } static const struct uacce_ops uacce_qm_ops = { @@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) struct uacce_device *uacce; unsigned long mmio_page_nr; unsigned long dus_page_nr; + u16 sq_depth, cq_depth; struct uacce_interface interface = { .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, @@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->is_vf = pdev->is_virtfn; uacce->priv = qm; - uacce->algs = qm->algs; if (qm->ver == QM_HW_V1) uacce->api_ver = HISI_QM_API_VER_BASE; @@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) mmio_page_nr = QM_DOORBELL_PAGE_NR; - else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) + else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) mmio_page_nr = QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; else mmio_page_nr = qm->db_interval / PAGE_SIZE; + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + /* Add one more page for device or qp status */ - dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >> + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + + sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> PAGE_SHIFT; uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; @@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) kfree(qm->qp_array); } -static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) +static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, + u16 sq_depth, u16 cq_depth) { struct device *dev = &qm->pdev->dev; - size_t off = qm->sqe_size * QM_Q_DEPTH; + size_t off = qm->sqe_size * sq_depth; struct hisi_qp *qp; int ret = -ENOMEM; @@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) qp->cqe = qp->qdma.va + off; qp->cqe_dma = qp->qdma.dma + off; qp->qdma.size = dma_size; + qp->sq_depth = sq_depth; + qp->cq_depth = cq_depth; qp->qm = qm; qp->qp_id = id; @@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; qm->misc_ctl = false; - if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) { + if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); } @@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm) { u32 val; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; val = readl(qm->io_base + QM_IFC_INT_MASK); @@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm) { u32 val; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; /* Clear communication interrupt source */ @@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; - if (qm->use_db_isolation) + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); iounmap(qm->io_base); @@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm) } idr_destroy(&qm->qp_idr); - kfree(qm->factor); + + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + kfree(qm->factor); } /** @@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); - qm_irq_unregister(qm); + qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); if (qm->use_sva) { uacce_remove(qm->uacce); @@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); - eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), DMA_TO_DEVICE); @@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); - aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), DMA_TO_DEVICE); @@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, static void qm_hw_error_init(struct hisi_qm *qm) { - struct hisi_qm_err_info *err_info = &qm->err_info; - if (!qm->ops->hw_error_init) { dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); return; } - qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); + qm->ops->hw_error_init(qm); } static void qm_hw_error_uninit(struct hisi_qm *qm) @@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm) qm->mb_qos = 0; /* vf ping pf to get function qos */ - if (qm->ops->ping_pf) { - ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS); - if (ret) { - pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); - return ret; - } + ret = qm_ping_pf(qm, QM_VF_GET_QOS); + if (ret) { + pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); + return ret; } while (true) { @@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = { * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. * @qm: The qm for which we want to add debugfs files. * - * Create function qos debugfs files. + * Create function qos debugfs files, VF ping PF to get function qos. */ static void hisi_qm_set_algqos_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, qm, &qm_algqos_fops); - else + else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, qm, &qm_algqos_fops); } @@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm) &qm_atomic64_ops); } - if (qm->ver >= QM_HW_V3) + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) hisi_qm_set_algqos_init(qm); } EXPORT_SYMBOL_GPL(hisi_qm_debug_init); @@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); +static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) +{ + int i; + + for (i = 1; i <= total_func; i++) + qm->factor[i].func_qos = QM_QOS_MAX_VAL; +} + /** * hisi_qm_sriov_enable() - enable virtual functions * @pdev: the PCIe device @@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) goto err_put_sync; } - num_vfs = min_t(int, max_vfs, total_vfs); + if (max_vfs > total_vfs) { + pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); + ret = -ERANGE; + goto err_put_sync; + } + + num_vfs = max_vfs; + + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + hisi_qm_init_vf_qos(qm, num_vfs); + ret = qm_vf_q_assign(qm, num_vfs); if (ret) { pci_err(pdev, "Can't assign queues for VF!\n"); @@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int total_vfs = pci_sriov_get_totalvfs(qm->pdev); int ret; if (pci_vfs_assigned(pdev)) { @@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) } pci_disable_sriov(pdev); - /* clear vf function shaper configure array */ - memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); + ret = qm_clear_vft_config(qm); if (ret) return ret; @@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts); - /* ce error does not need to be reset */ - if ((err_sts | qm->err_info.dev_ce_mask) == - qm->err_info.dev_ce_mask) { - if (qm->err_ini->clear_dev_hw_err_status) - qm->err_ini->clear_dev_hw_err_status(qm, - err_sts); + if (err_sts & qm->err_info.dev_reset_mask) + return ACC_ERR_NEED_RESET; - return ACC_ERR_RECOVERED; - } - - return ACC_ERR_NEED_RESET; + if (qm->err_ini->clear_dev_hw_err_status) + qm->err_ini->clear_dev_hw_err_status(qm, err_sts); } return ACC_ERR_RECOVERED; @@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, return 0; /* Kunpeng930 supports to notify VFs to stop before PF reset */ - if (qm->ops->ping_all_vfs) { - ret = qm->ops->ping_all_vfs(qm, cmd); + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { + ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); } else { @@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) } /* Kunpeng930 supports to notify VFs to start after PF reset. */ - if (qm->ops->ping_all_vfs) { - ret = qm->ops->ping_all_vfs(qm, cmd); + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { + ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); } else { @@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) if (pdev->is_virtfn) return PCI_ERS_RESULT_RECOVERED; - pci_aer_clear_nonfatal_status(pdev); - /* reset pcie device controller */ ret = qm_controller_reset(qm); if (ret) { @@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data) return IRQ_HANDLED; } -static int qm_irq_register(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - int ret; - - ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), - qm_irq, 0, qm->dev_name, qm); - if (ret) - return ret; - - if (qm->ver > QM_HW_V1) { - ret = request_threaded_irq(pci_irq_vector(pdev, - QM_AEQ_EVENT_IRQ_VECTOR), - qm_aeq_irq, qm_aeq_thread, - 0, qm->dev_name, qm); - if (ret) - goto err_aeq_irq; - - if (qm->fun_type == QM_HW_PF) { - ret = request_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), - qm_abnormal_irq, 0, qm->dev_name, qm); - if (ret) - goto err_abonormal_irq; - } - } - - if (qm->ver > QM_HW_V2) { - ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), - qm_mb_cmd_irq, 0, qm->dev_name, qm); - if (ret) - goto err_mb_cmd_irq; - } - - return 0; - -err_mb_cmd_irq: - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); -err_abonormal_irq: - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); -err_aeq_irq: - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - return ret; -} /** * hisi_qm_dev_shutdown() - Shutdown device. @@ -5711,7 +5707,7 @@ err_prepare: hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); out: pci_save_state(pdev); - ret = qm->ops->ping_pf(qm, cmd); + ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); } @@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm) cmd = QM_VF_START_FAIL; } - ret = qm->ops->ping_pf(qm, cmd); + ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); @@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); +static void qm_unregister_abnormal_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + if (qm->fun_type == QM_HW_VF) + return; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_abnormal_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_mb_cmd_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_aeq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_aeq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, + qm_aeq_thread, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_eq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_eq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + + return ret; +} + +static void qm_irqs_unregister(struct hisi_qm *qm) +{ + qm_unregister_mb_cmd_irq(qm); + qm_unregister_abnormal_irq(qm); + qm_unregister_aeq_irq(qm); + qm_unregister_eq_irq(qm); +} + +static int qm_irqs_register(struct hisi_qm *qm) +{ + int ret; + + ret = qm_register_eq_irq(qm); + if (ret) + return ret; + + ret = qm_register_aeq_irq(qm); + if (ret) + goto free_eq_irq; + + ret = qm_register_abnormal_irq(qm); + if (ret) + goto free_aeq_irq; + + ret = qm_register_mb_cmd_irq(qm); + if (ret) + goto free_abnormal_irq; + + return 0; + +free_abnormal_irq: + qm_unregister_abnormal_irq(qm); +free_aeq_irq: + qm_unregister_aeq_irq(qm); +free_eq_irq: + qm_unregister_eq_irq(qm); + return ret; +} + static int qm_get_qp_num(struct hisi_qm *qm) { - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = QM_QNUM_V1; - else if (qm->ver == QM_HW_V2) - qm->ctrl_qp_num = QM_QNUM_V2; - else - qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & - QM_QP_NUN_MASK; + bool is_db_isolation; - if (qm->use_db_isolation) - qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> - QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; - else - qm->max_qp_num = qm->ctrl_qp_num; + /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ + if (qm->fun_type == QM_HW_VF) { + if (qm->ver != QM_HW_V1) + /* v2 starts to support get vft by mailbox */ + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + return 0; + } + + is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); + qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); + qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, + QM_FUNC_MAX_QP_CAP, is_db_isolation); /* check if qp number is valid */ if (qm->qp_num > qm->max_qp_num) { @@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm) return 0; } +static void qm_get_hw_caps(struct hisi_qm *qm) +{ + const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? + qm_cap_info_pf : qm_cap_info_vf; + u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : + ARRAY_SIZE(qm_cap_info_vf); + u32 val, i; + + /* Doorbell isolate register is a independent register. */ + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); + if (val) + set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); + + if (qm->ver >= QM_HW_V3) { + val = readl(qm->io_base + QM_FUNC_CAPS_REG); + qm->cap_ver = val & QM_CAPBILITY_VERSION; + } + + /* Get PF/VF common capbility */ + for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); + if (val) + set_bit(qm_cap_info_comm[i].type, &qm->caps); + } + + /* Get PF/VF different capbility */ + for (i = 0; i < size; i++) { + val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); + if (val) + set_bit(cap_info[i].type, &qm->caps); + } +} + static int qm_get_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm) goto err_request_mem_regions; } - if (qm->ver > QM_HW_V2) { - if (qm->fun_type == QM_HW_PF) - qm->use_db_isolation = readl(qm->io_base + - QM_QUE_ISO_EN) & BIT(0); - else - qm->use_db_isolation = readl(qm->io_base + - QM_QUE_ISO_CFG_V) & BIT(0); - } - - if (qm->use_db_isolation) { + qm_get_hw_caps(qm); + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { qm->db_interval = QM_QP_DB_INTERVAL; qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); qm->db_io_base = ioremap(qm->db_phys_base, @@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm) qm->db_interval = 0; } - if (qm->fun_type == QM_HW_PF) { - ret = qm_get_qp_num(qm); - if (ret) - goto err_db_ioremap; - } + ret = qm_get_qp_num(qm); + if (ret) + goto err_db_ioremap; return 0; err_db_ioremap: - if (qm->use_db_isolation) + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); err_ioremap: iounmap(qm->io_base); @@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; pci_set_master(pdev); - if (!qm->ops->get_irq_num) { - ret = -EOPNOTSUPP; - goto err_get_pci_res; - } - num_vec = qm->ops->get_irq_num(qm); + num_vec = qm_get_irq_num(qm); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); @@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm) static int hisi_qp_alloc_memory(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; + u16 sq_depth, cq_depth; size_t qp_dma_size; int i, ret; @@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) return -ENOMEM; } + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + /* one more page for device or qp statuses */ - qp_dma_size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { qm->poll_data[i].qm = qm; - ret = hisi_qp_memory_init(qm, qp_dma_size, i); + ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); if (ret) goto err_init_qp_mem; @@ -6116,15 +6305,18 @@ err_init_qp_mem: static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - int ret, total_func, i; + int ret, total_func; size_t off = 0; - total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; - qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); - if (!qm->factor) - return -ENOMEM; - for (i = 0; i < total_func; i++) - qm->factor[i].func_qos = QM_QOS_MAX_VAL; + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { + total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; + qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); + if (!qm->factor) + return -ENOMEM; + + /* Only the PF value needs to be initialized */ + qm->factor[0].func_qos = QM_QOS_MAX_VAL; + } #define QM_INIT_BUF(qm, type, num) do { \ (qm)->type = ((qm)->qdma.va + (off)); \ @@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) } while (0) idr_init(&qm->qp_idr); - qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + - QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, GFP_ATOMIC); dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_alloc_qdma; + ret = -ENOMEM; + goto err_destroy_idr; } - QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); - QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, eqe, qm->eq_depth); + QM_INIT_BUF(qm, aeqe, qm->aeq_depth); QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num); @@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); -err_alloc_qdma: - kfree(qm->factor); +err_destroy_idr: + idr_destroy(&qm->qp_idr); + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + kfree(qm->factor); return ret; } @@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) return ret; - ret = qm_irq_register(qm); + ret = qm_irqs_register(qm); if (ret) goto err_pci_init; - if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_irq_register; - } - if (qm->fun_type == QM_HW_PF) { qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); @@ -6251,7 +6439,7 @@ err_alloc_uacce: qm->uacce = NULL; } err_irq_register: - qm_irq_unregister(qm); + qm_irqs_unregister(qm); err_pci_init: hisi_qm_pci_uninit(qm); return ret; @@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); @@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_get_noresume(dev); |