diff options
Diffstat (limited to 'drivers/net/ethernet/huawei')
40 files changed, 7615 insertions, 112 deletions
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 03e42512a2d5..300bc267a259 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -443,8 +443,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv) struct devlink *devlink = priv_to_devlink(priv); priv->hw_fault_reporter = - devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops, - 0, priv); + devlink_health_reporter_create(devlink, + &hinic_hw_fault_reporter_ops, + priv); if (IS_ERR(priv->hw_fault_reporter)) { dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n", PTR_ERR(priv->hw_fault_reporter)); @@ -452,8 +453,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv) } priv->fw_fault_reporter = - devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops, - 0, priv); + devlink_health_reporter_create(devlink, + &hinic_fw_fault_reporter_ops, + priv); if (IS_ERR(priv->fw_fault_reporter)) { dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n", PTR_ERR(priv->fw_fault_reporter)); diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile index 509dfbfb0e96..c3efa45a6a42 100644 --- a/drivers/net/ethernet/huawei/hinic3/Makefile +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -3,7 +3,9 @@ obj-$(CONFIG_HINIC3) += hinic3.o -hinic3-objs := hinic3_common.o \ +hinic3-objs := hinic3_cmdq.o \ + hinic3_common.o \ + hinic3_eqs.o \ hinic3_hw_cfg.o \ hinic3_hw_comm.o \ hinic3_hwdev.o \ @@ -12,10 +14,12 @@ hinic3-objs := hinic3_common.o \ hinic3_lld.o \ hinic3_main.o \ hinic3_mbox.o \ + hinic3_mgmt.o \ hinic3_netdev_ops.o \ hinic3_nic_cfg.o \ hinic3_nic_io.o \ hinic3_queue_common.o \ + hinic3_rss.o \ hinic3_rx.o \ hinic3_tx.o \ hinic3_wq.o diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c new file mode 100644 index 000000000000..ef539d1b69a3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> + +#include "hinic3_cmdq.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +#define CMDQ_BUF_SIZE 2048 +#define CMDQ_WQEBB_SIZE 64 + +#define CMDQ_CMD_TIMEOUT 5000 +#define CMDQ_ENABLE_WAIT_TIMEOUT 300 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0) +#define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53) +#define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61) +#define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62) +#define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0) +#define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52) +#define CMDQ_CTXT_SET(val, member) \ + FIELD_PREP(CMDQ_CTXT_##member##_MASK, val) + +#define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0) +#define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15) +#define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22) +#define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23) +#define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27) +#define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29) +#define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31) +#define CMDQ_WQE_HDR_SET(val, member) \ + FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val) +#define CMDQ_WQE_HDR_GET(val, member) \ + FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val)) + +#define CMDQ_CTRL_PI_MASK GENMASK(15, 0) +#define CMDQ_CTRL_CMD_MASK GENMASK(23, 16) +#define CMDQ_CTRL_MOD_MASK GENMASK(28, 24) +#define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31) +#define CMDQ_CTRL_SET(val, member) \ + FIELD_PREP(CMDQ_CTRL_##member##_MASK, val) +#define CMDQ_CTRL_GET(val, member) \ + FIELD_GET(CMDQ_CTRL_##member##_MASK, val) + +#define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0) +#define CMDQ_WQE_ERRCODE_GET(val, member) \ + FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val)) + +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0) +#define CMDQ_DB_INFO_SET(val, member) \ + FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val) + +#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23) +#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24) +#define CMDQ_DB_HEAD_SET(val, member) \ + FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val) + +#define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0) +#define CMDQ_CEQE_GET(val, member) \ + FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val)) + +#define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe)) +#define CMDQ_WQE_COMPLETED(ctrl_info) \ + CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT) + +#define CMDQ_PFN(addr) ((addr) >> 12) + +/* cmdq work queue's chip logical address table is up to 512B */ +#define CMDQ_WQ_CLA_SIZE 512 + +/* Completion codes: send, direct sync, force stop */ +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_DIRECT_SYNC_CMPT_CODE 11 +#define CMDQ_FORCE_STOP_CMPT_CODE 12 + +enum cmdq_data_format { + CMDQ_DATA_SGE = 0, + CMDQ_DATA_DIRECT = 1, +}; + +enum cmdq_ctrl_sect_len { + CMDQ_CTRL_SECT_LEN = 1, + CMDQ_CTRL_DIRECT_SECT_LEN = 2, +}; + +enum cmdq_bufdesc_len { + CMDQ_BUFDESC_LCMD_LEN = 2, + CMDQ_BUFDESC_SCMD_LEN = 3, +}; + +enum cmdq_completion_format { + CMDQ_COMPLETE_DIRECT = 0, + CMDQ_COMPLETE_SGE = 1, +}; + +enum cmdq_cmd_type { + CMDQ_CMD_DIRECT_RESP, + CMDQ_CMD_SGE_RESP, +}; + +#define CMDQ_WQE_NUM_WQEBBS 1 + +static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci) +{ + if (hinic3_wq_get_used(wq) == 0) + return NULL; + + *ci = wq->cons_idx & wq->idx_mask; + + return get_q_element(&wq->qpages, wq->cons_idx, NULL); +} + +struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmd_buf *cmd_buf; + struct hinic3_cmdqs *cmdqs; + + cmdqs = hwdev->cmdqs; + + cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto err_free_cmd_buf; + } + + cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE); + refcount_set(&cmd_buf->ref_cnt, 1); + + return cmd_buf; + +err_free_cmd_buf: + kfree(cmd_buf); + + return NULL; +} + +void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev, + struct hinic3_cmd_buf *cmd_buf) +{ + struct hinic3_cmdqs *cmdqs; + + if (!refcount_dec_and_test(&cmd_buf->ref_cnt)) + return; + + cmdqs = hwdev->cmdqs; + + dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} + +static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_hwdev *hwdev) +{ + if (cmd_info->buf_in) { + hinic3_free_cmd_buf(hwdev, cmd_info->buf_in); + cmd_info->buf_in = NULL; + } +} + +static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq, + struct cmdq_wqe *wqe, u16 ci) +{ + struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe); + __le32 header_info = hdr->header_info; + enum cmdq_data_format df; + struct cmdq_ctrl *ctrl; + + df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT); + if (df == CMDQ_DATA_SGE) + ctrl = &wqe->wqe_lcmd.ctrl; + else + ctrl = &wqe->wqe_scmd.ctrl; + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE; + wmb(); /* verify wqe is clear before updating ci */ + hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS); +} + +static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx, + struct cmdq_wqe *wqe) +{ + struct hinic3_cmdq_cmd_info *cmd_info; + struct cmdq_wqe_lcmd *wqe_lcmd; + __le32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + if (cmd_info->errcode) { + status_info = wqe_lcmd->status.status_info; + *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp) + *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val; +} + +static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq, + struct cmdq_wqe *wqe, u16 ci) +{ + spin_lock(&cmdq->cmdq_lock); + cmdq_update_cmd_status(cmdq, ci, wqe); + if (cmdq->cmd_infos[ci].cmpt_code) { + *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE; + cmdq->cmd_infos[ci].cmpt_code = NULL; + } + + /* Ensure that completion code has been updated before updating done */ + smp_wmb(); + if (cmdq->cmd_infos[ci].done) { + complete(cmdq->cmd_infos[ci].done); + cmdq->cmd_infos[ci].done = NULL; + } + spin_unlock(&cmdq->cmdq_lock); + + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data) +{ + enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + struct hinic3_cmdq_cmd_info *cmd_info; + struct cmdq_wqe_lcmd *wqe_lcmd; + struct hinic3_cmdq *cmdq; + struct cmdq_wqe *wqe; + __le32 ctrl_info; + u16 ci; + + if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq))) + return; + + cmdq = &cmdqs->cmdq[cmdq_type]; + while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + switch (cmd_info->cmd_type) { + case HINIC3_CMD_TYPE_NONE: + return; + case HINIC3_CMD_TYPE_TIMEOUT: + dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + fallthrough; + case HINIC3_CMD_TYPE_FAKE_TIMEOUT: + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + break; + default: + /* only arm bit is using scmd wqe, + * the other wqe is lcmd + */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl_info = wqe_lcmd->ctrl.ctrl_info; + if (!CMDQ_WQE_COMPLETED(ctrl_info)) + return; + + dma_rmb(); + /* For FORCE_STOP cmd_type, we also need to wait for + * the firmware processing to complete to prevent the + * firmware from accessing the released cmd_buf + */ + if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) { + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + } else { + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + + break; + } + } +} + +static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT); + do { + if (cmdqs->status & HINIC3_CMDQ_ENABLE) + return 0; + usleep_range(1000, 2000); + } while (time_before(jiffies, end) && !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +static void cmdq_set_completion(struct cmdq_completion *complete, + struct hinic3_cmd_buf *buf_out) +{ + struct hinic3_sge *sge = &complete->resp.sge; + + hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE)); +} + +static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi) +{ + if (!hinic3_wq_free_wqebbs(wq)) + return NULL; + + return hinic3_wq_get_one_wqebb(wq, pi); +} + +static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe, + struct hinic3_cmd_buf *buf_in) +{ + hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, + (__force __le32)buf_in->size); +} + +static void cmdq_set_db(struct hinic3_cmdq *cmdq, + enum hinic3_cmdq_type cmdq_type, u16 prod_idx) +{ + u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base; + u16 db_ofs = (prod_idx & 0xFF) << 3; + struct cmdq_db db; + + db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX)); + db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) | + CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE)); + writeq(*(u64 *)&db, db_base + db_ofs); +} + +static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe, + const struct cmdq_wqe *shadow_wqe) +{ + const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe; + struct cmdq_header *dst = (struct cmdq_header *)hw_wqe; + size_t len; + + len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header); + memcpy(dst + 1, src + 1, len); + /* Ensure buffer len before updating header */ + wmb(); + WRITE_ONCE(*dst, *src); +} + +static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped, + u8 mod, u8 cmd, u16 prod_idx, + enum cmdq_completion_format complete_format, + enum cmdq_data_format data_format, + enum cmdq_bufdesc_len buf_len) +{ + struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe); + enum cmdq_ctrl_sect_len ctrl_len; + struct cmdq_wqe_lcmd *wqe_lcmd; + struct cmdq_wqe_scmd *wqe_scmd; + struct cmdq_ctrl *ctrl; + + if (data_format == CMDQ_DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CMDQ_CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->wqe_scmd; + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = + cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD)); + + hdr->header_info = + cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HDR_SET(data_format, DATA_FMT) | + CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) | + CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) | + CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT)); +} + +static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, + u8 wrapped, u8 mod, u8 cmd, u16 prod_idx) +{ + enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT; + struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + + switch (cmd_type) { + case CMDQ_CMD_DIRECT_RESP: + wqe_lcmd->completion.resp.direct.val = 0; + break; + case CMDQ_CMD_SGE_RESP: + if (buf_out) { + complete_format = CMDQ_COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, + CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN); + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq, + struct cmdq_wqe *wqe, u16 pi) +{ + struct cmdq_wqe_lcmd *wqe_lcmd; + struct cmdq_ctrl *ctrl; + __le32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = ctrl->ctrl_info; + if (!CMDQ_WQE_COMPLETED(ctrl_info)) { + dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + cmdq_update_cmd_status(cmdq, pi, wqe); + + return 0; +} + +static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info, + const struct hinic3_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct cmdq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo = msecs_to_jiffies(timeout); + int err; + + if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) + return 0; + + spin_lock_bh(&cmdq->cmdq_lock); + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) { + dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx); + if (err) + cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + dev_err(cmdq->hwdev->dev, + "Cmdq sync command current msg id mismatch cmd_info msg id\n"); + } + + clear_cmd_info(cmd_info, saved_cmd_info); + spin_unlock_bh(&cmdq->cmdq_lock); + + return err; +} + +static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, + __le64 *out_param) +{ + struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + struct cmdq_wqe *curr_wqe, wqe = {}; + struct hinic3_wq *wq = &cmdq->wq; + u16 curr_prod_idx, next_prod_idx; + struct completion done; + u64 curr_msg_id; + int errcode; + u8 wrapped; + int err; + + spin_lock_bh(&cmdq->cmdq_lock); + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped ^= 1; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + init_completion(&done); + refcount_inc(&buf_in->ref_cnt); + cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->buf_in = buf_in; + saved_cmd_info = *cmd_info; + cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, mod, cmd, curr_prod_idx); + + cmdq_wqe_fill(curr_wqe, &wqe); + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx); + spin_unlock_bh(&cmdq->cmdq_lock); + + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, CMDQ_CMD_TIMEOUT); + if (err) { + dev_err(cmdq->hwdev->dev, + "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + dev_dbg(cmdq->hwdev->dev, + "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd); + err = -EAGAIN; + } + + smp_rmb(); /* read error code after completion */ + + return err ? err : errcode; +} + +int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, __le64 *out_param) +{ + struct hinic3_cmdqs *cmdqs; + int err; + + cmdqs = hwdev->cmdqs; + err = wait_cmdqs_enable(cmdqs); + if (err) { + dev_err(hwdev->dev, "Cmdq is disabled\n"); + return err; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], + mod, cmd, buf_in, out_param); + + return err; +} + +static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, + struct comm_cmdq_ctxt_info *ctxt_info) +{ + const struct hinic3_cmdqs *cmdqs; + u64 cmdq_first_block_paddr, pfn; + const struct hinic3_wq *wq; + + cmdqs = hwdev->cmdqs; + wq = &cmdqs->cmdq[cmdq_id].wq; + pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq)); + + ctxt_info->curr_wqe_page_pfn = + cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_SET(1, CEQ_EN) | + CMDQ_CTXT_SET(1, CEQ_ARM) | + CMDQ_CTXT_SET(0, EQ_ID) | + CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN)); + + if (!hinic3_wq_is_0_level_cla(wq)) { + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) | + CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN)); +} + +static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev, + enum hinic3_cmdq_type q_type) +{ + int err; + + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + return err; + } + + return 0; +} + +static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id) +{ + struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt); + cmdq_ctxt.func_id = hinic3_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id; + + mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt, + sizeof(cmdq_ctxt)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_CMDQ_CTXT, &msg_params); + if (err || cmdq_ctxt.head.status) { + dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n", + err, cmdq_ctxt.head.status); + return -EFAULT; + } + + return 0; +} + +static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + int err; + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type); + if (err) + return err; + } + + cmdqs->status |= HINIC3_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +static int create_cmdq_wq(struct hinic3_hwdev *hwdev, + struct hinic3_cmdqs *cmdqs) +{ + u8 cmdq_type; + int err; + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq, + CMDQ_DEPTH, CMDQ_WQEBB_SIZE); + if (err) { + dev_err(hwdev->dev, "Failed to create cmdq wq\n"); + goto err_destroy_wq; + } + } + + /* 1-level Chip Logical Address (CLA) must put all + * cmdq's wq page addr in one wq block + */ + if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) { + if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages > + CMDQ_WQ_CLA_SIZE / sizeof(u64)) { + err = -EINVAL; + dev_err(hwdev->dev, + "Cmdq number of wq pages exceeds limit: %lu\n", + CMDQ_WQ_CLA_SIZE / sizeof(u64)); + goto err_destroy_wq; + } + + cmdqs->wq_block_vaddr = + dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE, + &cmdqs->wq_block_paddr, GFP_KERNEL); + if (!cmdqs->wq_block_vaddr) { + err = -ENOMEM; + goto err_destroy_wq; + } + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) + memcpy((u8 *)cmdqs->wq_block_vaddr + + CMDQ_WQ_CLA_SIZE * cmdq_type, + cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr, + cmdqs->cmdq[cmdq_type].wq.qpages.num_pages * + sizeof(__be64)); + } + + return 0; + +err_destroy_wq: + while (cmdq_type > 0) { + cmdq_type--; + hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq); + } + + return err; +} + +static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev, + struct hinic3_cmdqs *cmdqs) +{ + u8 cmdq_type; + + if (cmdqs->wq_block_vaddr) + dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE, + cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) + hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq); +} + +static int init_cmdqs(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + cmdqs->cmdq_num = hwdev->max_cmdq; + + cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev, + CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0); + if (!cmdqs->cmd_buf_pool) { + dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n"); + kfree(cmdqs); + return -ENOMEM; + } + + return 0; +} + +static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info) +{ + if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP) + return; + + cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP; + + if (cmd_info->cmpt_code && + *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) + *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; + + if (cmd_info->done) { + complete(cmd_info->done); + cmd_info->done = NULL; + cmd_info->cmpt_code = NULL; + cmd_info->direct_resp = NULL; + cmd_info->errcode = NULL; + } +} + +static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq) +{ + struct hinic3_cmdq_cmd_info *cmd_info; + u16 ci; + + spin_lock_bh(&cmdq->cmdq_lock); + while (cmdq_read_wqe(&cmdq->wq, &ci)) { + hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS); + cmd_info = &cmdq->cmd_infos[ci]; + if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP) + cmdq_flush_sync_cmd(cmd_info); + } + spin_unlock_bh(&cmdq->cmdq_lock); +} + +void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdq *cmdq; + u16 wqe_cnt, wqe_idx, i; + struct hinic3_wq *wq; + + cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC]; + spin_lock_bh(&cmdq->cmdq_lock); + wq = &cmdq->wq; + wqe_cnt = hinic3_wq_get_used(wq); + for (i = 0; i < wqe_cnt; i++) { + wqe_idx = (wq->cons_idx + i) & wq->idx_mask; + cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx); + } + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq) +{ + u16 i; + + for (i = 0; i < cmdq->wq.q_depth; i++) + cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); +} + +int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]); + hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq); + } + + return hinic3_set_cmdq_ctxts(hwdev); +} + +int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs; + void __iomem *db_base; + u8 cmdq_type; + int err; + + err = init_cmdqs(hwdev); + if (err) + goto err_out; + + cmdqs = hwdev->cmdqs; + err = create_cmdq_wq(hwdev, cmdqs); + if (err) + goto err_free_cmdqs; + + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); + if (err) { + dev_err(hwdev->dev, "Failed to allocate doorbell address\n"); + goto err_destroy_cmdq_wq; + } + cmdqs->cmdqs_db_base = db_base; + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); + if (err) { + dev_err(hwdev->dev, + "Failed to initialize cmdq type : %d\n", + cmdq_type); + goto err_free_cmd_infos; + } + } + + err = hinic3_set_cmdq_ctxts(hwdev); + if (err) + goto err_free_cmd_infos; + + return 0; + +err_free_cmd_infos: + while (cmdq_type > 0) { + cmdq_type--; + kfree(cmdqs->cmdq[cmdq_type].cmd_infos); + } + + hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base); + +err_destroy_cmdq_wq: + destroy_cmdq_wq(hwdev, cmdqs); + +err_free_cmdqs: + dma_pool_destroy(cmdqs->cmd_buf_pool); + kfree(cmdqs); + +err_out: + return err; +} + +void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + + cmdqs->status &= ~HINIC3_CMDQ_ENABLE; + + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]); + hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]); + kfree(cmdqs->cmdq[cmdq_type].cmd_infos); + } + + hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base); + destroy_cmdq_wq(hwdev, cmdqs); + dma_pool_destroy(cmdqs->cmd_buf_pool); + kfree(cmdqs); +} + +bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq) +{ + return hinic3_wq_get_used(&cmdq->wq) == 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h new file mode 100644 index 000000000000..f99c386a2780 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_CMDQ_H_ +#define _HINIC3_CMDQ_H_ + +#include <linux/dmapool.h> + +#include "hinic3_hw_intf.h" +#include "hinic3_wq.h" + +#define CMDQ_DEPTH 4096 + +struct cmdq_db { + __le32 db_head; + __le32 db_info; +}; + +/* hw defined cmdq wqe header */ +struct cmdq_header { + __le32 header_info; + __le32 saved_data; +}; + +struct cmdq_lcmd_bufdesc { + struct hinic3_sge sge; + __le64 rsvd2; + __le64 rsvd3; +}; + +struct cmdq_status { + __le32 status_info; +}; + +struct cmdq_ctrl { + __le32 ctrl_info; +}; + +struct cmdq_direct_resp { + __le64 val; + __le64 rsvd; +}; + +struct cmdq_completion { + union { + struct hinic3_sge sge; + struct cmdq_direct_resp direct; + } resp; +}; + +struct cmdq_wqe_scmd { + struct cmdq_header header; + __le64 rsvd3; + struct cmdq_status status; + struct cmdq_ctrl ctrl; + struct cmdq_completion completion; + __le32 rsvd10[6]; +}; + +struct cmdq_wqe_lcmd { + struct cmdq_header header; + struct cmdq_status status; + struct cmdq_ctrl ctrl; + struct cmdq_completion completion; + struct cmdq_lcmd_bufdesc buf_desc; +}; + +struct cmdq_wqe { + union { + struct cmdq_wqe_scmd wqe_scmd; + struct cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +static_assert(sizeof(struct cmdq_wqe) == 64); + +enum hinic3_cmdq_type { + HINIC3_CMDQ_SYNC = 0, + HINIC3_MAX_CMDQ_TYPES = 4 +}; + +enum hinic3_cmdq_status { + HINIC3_CMDQ_ENABLE = BIT(0), +}; + +enum hinic3_cmdq_cmd_type { + HINIC3_CMD_TYPE_NONE, + HINIC3_CMD_TYPE_DIRECT_RESP, + HINIC3_CMD_TYPE_FAKE_TIMEOUT, + HINIC3_CMD_TYPE_TIMEOUT, + HINIC3_CMD_TYPE_FORCE_STOP, +}; + +struct hinic3_cmd_buf { + void *buf; + dma_addr_t dma_addr; + __le16 size; + refcount_t ref_cnt; +}; + +struct hinic3_cmdq_cmd_info { + enum hinic3_cmdq_cmd_type cmd_type; + struct completion *done; + int *errcode; + /* completion code */ + int *cmpt_code; + __le64 *direct_resp; + u64 cmdq_msg_id; + struct hinic3_cmd_buf *buf_in; +}; + +struct hinic3_cmdq { + struct hinic3_wq wq; + enum hinic3_cmdq_type cmdq_type; + u8 wrapped; + /* synchronize command submission with completions via event queue */ + spinlock_t cmdq_lock; + struct hinic3_cmdq_cmd_info *cmd_infos; + struct hinic3_hwdev *hwdev; +}; + +struct hinic3_cmdqs { + struct hinic3_hwdev *hwdev; + struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES]; + struct dma_pool *cmd_buf_pool; + /* doorbell area */ + u8 __iomem *cmdqs_db_base; + + /* When command queue uses multiple memory pages (1-level CLA), this + * block will hold aggregated indirection table for all command queues + * of cmdqs. Not used for small cmdq (0-level CLA). + */ + dma_addr_t wq_block_paddr; + void *wq_block_vaddr; + + u32 status; + u32 disable_flag; + u8 cmdq_num; +}; + +int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev); +void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev); + +struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev); +void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev, + struct hinic3_cmd_buf *cmd_buf); +void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data); + +int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, __le64 *out_param); + +void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev); +int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev); +bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c index 0aa42068728c..fe4778d152cf 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c @@ -3,6 +3,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/iopoll.h> #include "hinic3_common.h" @@ -51,3 +52,25 @@ void hinic3_dma_free_coherent_align(struct device *dev, dma_free_coherent(dev, mem_align->real_size, mem_align->ori_vaddr, mem_align->ori_paddr); } + +int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum hinic3_wait_return ret; + int err; + + err = read_poll_timeout(handler, ret, ret == HINIC3_WAIT_PROCESS_CPL, + wait_once_us, wait_total_ms * USEC_PER_MSEC, + false, priv_data); + + return err; +} + +/* Data provided to/by cmdq is arranged in structs with little endian fields but + * every dword (32bits) should be swapped since HW swaps it again when it + * copies it from/to host memory. + */ +void hinic3_cmdq_buf_swab32(void *data, int len) +{ + swab32_array(data, len / sizeof(u32)); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h index bb795dace04c..a8fabfae90fb 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h @@ -18,10 +18,37 @@ struct hinic3_dma_addr_align { dma_addr_t align_paddr; }; +enum hinic3_wait_return { + HINIC3_WAIT_PROCESS_CPL = 0, + HINIC3_WAIT_PROCESS_WAITING = 1, +}; + +struct hinic3_sge { + __le32 hi_addr; + __le32 lo_addr; + __le32 len; + __le32 rsvd; +}; + +static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr, + __le32 len) +{ + sge->hi_addr = cpu_to_le32(upper_32_bits(addr)); + sge->lo_addr = cpu_to_le32(lower_32_bits(addr)); + sge->len = len; + sge->rsvd = 0; +} + int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, gfp_t flag, struct hinic3_dma_addr_align *mem_align); void hinic3_dma_free_coherent_align(struct device *dev, struct hinic3_dma_addr_align *mem_align); +typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data); +int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us); + +void hinic3_cmdq_buf_swab32(void *data, int len); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h new file mode 100644 index 000000000000..e7417e8efa99 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_CSR_H_ +#define _HINIC3_CSR_H_ + +#define HINIC3_CFG_REGS_FLAG 0x40000000 +#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF + +#define HINIC3_VF_CFG_REG_OFFSET 0x2000 + +/* HW interface registers */ +#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0) +#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4) +#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8) +#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC) +#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10) +#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14) +#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18) + +#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF (HINIC3_CFG_REGS_FLAG + 0x0100) +#define HINIC3_FUNC_CSR_MAILBOX_INT_OFF (HINIC3_CFG_REGS_FLAG + 0x0104) +#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108) +#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C) + +#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380) +#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390) + +/* MSI-X registers */ +#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58) + +#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK BIT(0) +#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK BIT(1) +#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK BIT(2) +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK BIT(3) +#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK BIT(4) +#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK GENMASK(31, 22) +#define HINIC3_MSI_CLR_INDIR_SET(val, member) \ + FIELD_PREP(HINIC3_MSI_CLR_INDIR_##member##_MASK, val) + +/* EQ registers */ +#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210) +#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290) + +#define HINIC3_EQ_INDIR_IDX_ADDR(type) \ + ((type == HINIC3_AEQ) ? HINIC3_AEQ_INDIR_IDX_ADDR : \ + HINIC3_CEQ_INDIR_IDX_ADDR) + +#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240) +#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0) + +#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \ + HINIC3_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \ + HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \ + HINIC3_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \ + HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200) +#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204) +#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C) +#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50) + +#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c) +#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c new file mode 100644 index 000000000000..01686472985b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> + +#include "hinic3_csr.h" +#include "hinic3_eqs.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +#define AEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0) +#define AEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12) +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(22, 20) +#define AEQ_CTRL_0_INTR_MODE_MASK BIT(31) +#define AEQ_CTRL_0_SET(val, member) \ + FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val) + +#define AEQ_CTRL_1_LEN_MASK GENMASK(20, 0) +#define AEQ_CTRL_1_ELEM_SIZE_MASK GENMASK(25, 24) +#define AEQ_CTRL_1_PAGE_SIZE_MASK GENMASK(31, 28) +#define AEQ_CTRL_1_SET(val, member) \ + FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val) + +#define CEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0) +#define CEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12) +#define CEQ_CTRL_0_LIMIT_KICK_MASK GENMASK(23, 20) +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(25, 24) +#define CEQ_CTRL_0_PAGE_SIZE_MASK GENMASK(30, 27) +#define CEQ_CTRL_0_INTR_MODE_MASK BIT(31) +#define CEQ_CTRL_0_SET(val, member) \ + FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val) + +#define CEQ_CTRL_1_LEN_MASK GENMASK(19, 0) +#define CEQ_CTRL_1_SET(val, member) \ + FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val) + +#define CEQE_TYPE_MASK GENMASK(25, 23) +#define CEQE_TYPE(type) \ + FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type)) + +#define CEQE_DATA_MASK GENMASK(25, 0) +#define CEQE_DATA(data) ((data) & cpu_to_le32(CEQE_DATA_MASK)) + +#define EQ_ELEM_DESC_TYPE_MASK GENMASK(6, 0) +#define EQ_ELEM_DESC_SRC_MASK BIT(7) +#define EQ_ELEM_DESC_SIZE_MASK GENMASK(15, 8) +#define EQ_ELEM_DESC_WRAPPED_MASK BIT(31) +#define EQ_ELEM_DESC_GET(val, member) \ + FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val)) + +#define EQ_CI_SIMPLE_INDIR_CI_MASK GENMASK(20, 0) +#define EQ_CI_SIMPLE_INDIR_ARMED_MASK BIT(21) +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK GENMASK(31, 30) +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK GENMASK(31, 24) +#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ + FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val) + +#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == HINIC3_AEQ) ? \ + HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (((eq)->type == HINIC3_AEQ) ? \ + HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR) + +#define EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + (((type) == HINIC3_AEQ) ? \ + HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ + HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num)) + +#define EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + (((type) == HINIC3_AEQ) ? \ + HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ + HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num)) + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define HINIC3_EQ_MAX_PAGES(eq) \ + ((eq)->type == HINIC3_AEQ ? \ + HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES) + +#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024 +#define HINIC3_EQ_UPDATE_CI_STEP 64 +#define HINIC3_EQS_WQ_NAME "hinic3_eqs" + +#define HINIC3_EQ_VALID_SHIFT 31 +#define HINIC3_EQ_WRAPPED(eq) \ + ((eq)->wrapped << HINIC3_EQ_VALID_SHIFT) + +#define HINIC3_EQ_WRAPPED_SHIFT 20 +#define HINIC3_EQ_CONS_IDX(eq) \ + ((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT)) + +static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq) +{ + return get_q_element(&eq->qpages, eq->cons_idx, NULL); +} + +static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq) +{ + return get_q_element(&eq->qpages, eq->cons_idx, NULL); +} + +int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev, + enum hinic3_aeq_type event, + hinic3_aeq_event_cb hwe_cb) +{ + struct hinic3_aeqs *aeqs; + + aeqs = hwdev->aeqs; + aeqs->aeq_cb[event] = hwe_cb; + spin_lock_init(&aeqs->aeq_lock); + + return 0; +} + +void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev, + enum hinic3_aeq_type event) +{ + struct hinic3_aeqs *aeqs; + + aeqs = hwdev->aeqs; + + spin_lock_bh(&aeqs->aeq_lock); + aeqs->aeq_cb[event] = NULL; + spin_unlock_bh(&aeqs->aeq_lock); +} + +int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev, + enum hinic3_ceq_event event, + hinic3_ceq_event_cb callback) +{ + struct hinic3_ceqs *ceqs; + + ceqs = hwdev->ceqs; + ceqs->ceq_cb[event] = callback; + spin_lock_init(&ceqs->ceq_lock); + + return 0; +} + +void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev, + enum hinic3_ceq_event event) +{ + struct hinic3_ceqs *ceqs; + + ceqs = hwdev->ceqs; + + spin_lock_bh(&ceqs->ceq_lock); + ceqs->ceq_cb[event] = NULL; + spin_unlock_bh(&ceqs->ceq_lock); +} + +/* Set consumer index in the hw. */ +static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state) +{ + u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); + u32 eq_wrap_ci, val; + + eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq); + val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); + if (eq->type == HINIC3_AEQ) { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); + } else { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); + } + + hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq) +{ + return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]); +} + +static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe) +{ + enum hinic3_ceq_event event = CEQE_TYPE(ceqe); + struct hinic3_hwdev *hwdev = ceqs->hwdev; + __le32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HINIC3_MAX_CEQ_EVENTS) { + dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n", + event, ceqe_data); + return; + } + + spin_lock_bh(&ceqs->ceq_lock); + if (ceqs->ceq_cb[event]) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + spin_unlock_bh(&ceqs->ceq_lock); +} + +static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq) +{ + return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]); +} + +static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe, + const struct hinic3_aeq_elem *aeqe_pos) +{ + struct hinic3_hwdev *hwdev = aeqs->hwdev; + u8 data[HINIC3_AEQE_DATA_SIZE], size; + enum hinic3_aeq_type event; + hinic3_aeq_event_cb hwe_cb; + + if (EQ_ELEM_DESC_GET(aeqe, SRC)) + return; + + event = EQ_ELEM_DESC_GET(aeqe, TYPE); + if (event >= HINIC3_MAX_AEQ_EVENTS) { + dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event); + return; + } + + memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE); + swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32)); + size = EQ_ELEM_DESC_GET(aeqe, SIZE); + + spin_lock_bh(&aeqs->aeq_lock); + hwe_cb = aeqs->aeq_cb[event]; + if (hwe_cb) + hwe_cb(aeqs->hwdev, data, size); + spin_unlock_bh(&aeqs->aeq_lock); +} + +static int aeq_irq_handler(struct hinic3_eq *eq) +{ + const struct hinic3_aeq_elem *aeqe_pos; + struct hinic3_aeqs *aeqs; + u32 i, eqe_cnt = 0; + __le32 aeqe; + + aeqs = aeq_to_aeqs(eq); + for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = get_curr_aeq_elem(eq); + aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc); + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped) + return 0; + + /* Prevent speculative reads from element */ + dma_rmb(); + aeq_event_handler(aeqs, aeqe, aeqe_pos); + eq->cons_idx++; + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + } + } + + return -EAGAIN; +} + +static int ceq_irq_handler(struct hinic3_eq *eq) +{ + struct hinic3_ceqs *ceqs; + u32 eqe_cnt = 0; + __be32 ceqe_raw; + __le32 ceqe; + u32 i; + + ceqs = ceq_to_ceqs(eq); + for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) { + ceqe_raw = *get_curr_ceq_elem(eq); + ceqe = (__force __le32)swab32((__force __u32)ceqe_raw); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return 0; + + ceq_event_handler(ceqs, ceqe); + eq->cons_idx++; + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + } + } + + return -EAGAIN; +} + +static void reschedule_aeq_handler(struct hinic3_eq *eq) +{ + struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq); + + queue_work(aeqs->workq, &eq->aeq_work); +} + +static int eq_irq_handler(struct hinic3_eq *eq) +{ + int err; + + if (eq->type == HINIC3_AEQ) + err = aeq_irq_handler(eq); + else + err = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED : + HINIC3_EQ_ARMED); + + return err; +} + +static void aeq_irq_work(struct work_struct *work) +{ + struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work); + int err; + + err = eq_irq_handler(eq); + if (err) + reschedule_aeq_handler(eq); +} + +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct workqueue_struct *workq; + struct hinic3_eq *aeq = data; + struct hinic3_hwdev *hwdev; + struct hinic3_aeqs *aeqs; + + aeqs = aeq_to_aeqs(aeq); + hwdev = aeq->hwdev; + + /* clear resend timer cnt register */ + workq = aeqs->workq; + hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + queue_work(workq, &aeq->aeq_work); + + return IRQ_HANDLED; +} + +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic3_eq *ceq = data; + int err; + + /* clear resend timer counters */ + hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + err = eq_irq_handler(ceq); + if (err) + return IRQ_NONE; + + return IRQ_HANDLED; +} + +static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + ceq_ctrl.func_id = hinic3_global_func_id(hwdev); + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_CEQ_CTRL_REG, &msg_params); + if (err || ceq_ctrl.head.status) { + dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n", + q_id, err, ceq_ctrl.head.status); + return -EFAULT; + } + + return 0; +} + +static int set_eq_ctrls(struct hinic3_eq *eq) +{ + struct hinic3_hwif *hwif = eq->hwdev->hwif; + struct hinic3_queue_pages *qpages; + u8 pci_intf_idx, elem_size; + u32 mask, ctrl0, ctrl1; + u32 page_size_val; + int err; + + qpages = &eq->qpages; + page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE); + pci_intf_idx = hwif->attr.pci_intf_idx; + + if (eq->type == HINIC3_AEQ) { + /* set ctrl0 using read-modify-write */ + mask = AEQ_CTRL_0_INTR_IDX_MASK | + AEQ_CTRL_0_DMA_ATTR_MASK | + AEQ_CTRL_0_PCI_INTF_IDX_MASK | + AEQ_CTRL_0_INTR_MODE_MASK; + ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR); + ctrl0 = (ctrl0 & ~mask) | + AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(0, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE); + hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0); + + /* HW expects log2(number of 32 byte units). */ + elem_size = qpages->elem_size_shift - 5; + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1); + } else { + ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(0, DMA_ATTR) | + CEQ_CTRL_0_SET(0, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | + CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); + + /* set ceq ctrl reg through mgmt cpu */ + err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, + ctrl1); + if (err) + return err; + } + + return 0; +} + +static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + __be32 *ceqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = get_q_element(&eq->qpages, i, NULL); + *ceqe = cpu_to_be32(init_val); + } + + wmb(); /* Clear ceq elements bit */ +} + +static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + struct hinic3_aeq_elem *aeqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = get_q_element(&eq->qpages, i, NULL); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Clear aeq elements bit */ +} + +static void eq_elements_init(struct hinic3_eq *eq, u32 init_val) +{ + if (eq->type == HINIC3_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); +} + +static int alloc_eq_pages(struct hinic3_eq *eq) +{ + struct hinic3_hwif *hwif = eq->hwdev->hwif; + struct hinic3_queue_pages *qpages; + dma_addr_t page_paddr; + u32 reg, init_val; + u16 pg_idx; + int err; + + qpages = &eq->qpages; + err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE); + if (err) + return err; + + for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) { + page_paddr = qpages->pages[pg_idx].align_paddr; + reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); + hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr)); + reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); + hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr)); + } + + init_val = HINIC3_EQ_WRAPPED(eq); + eq_elements_init(eq, init_val); + + return 0; +} + +static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size) +{ + u32 max_pages, min_page_size, page_size, total_size; + + /* No need for complicated arithmetic. All values must be power of 2. + * Multiplications give power of 2 and divisions give power of 2 without + * remainder. + */ + max_pages = HINIC3_EQ_MAX_PAGES(eq); + min_page_size = HINIC3_MIN_PAGE_SIZE; + total_size = eq->eq_len * elem_size; + + if (total_size <= max_pages * min_page_size) + page_size = min_page_size; + else + page_size = total_size / max_pages; + + hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size); +} + +static int request_eq_irq(struct hinic3_eq *eq) +{ + int err; + + if (eq->type == HINIC3_AEQ) { + INIT_WORK(&eq->aeq_work, aeq_irq_work); + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic3_aeq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pdev)); + err = request_irq(eq->irq_id, aeq_interrupt, 0, + eq->irq_name, eq); + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic3_ceq%u@pci:%s", eq->q_id, + pci_name(eq->hwdev->pdev)); + err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt, + IRQF_ONESHOT, eq->irq_name, eq); + } + + return err; +} + +static void reset_eq(struct hinic3_eq *eq) +{ + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HINIC3_AEQ) + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_CSR_AEQ_CTRL_1_ADDR, 0); + else + hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); +} + +static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id, + u32 q_len, enum hinic3_eq_type type, + struct msix_entry *msix_entry) +{ + u32 elem_size; + int err; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + + reset_eq(eq); + + eq->cons_idx = 0; + eq->wrapped = 0; + + elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE; + eq_calc_page_size_and_num(eq, elem_size); + + err = alloc_eq_pages(eq); + if (err) { + dev_err(hwdev->dev, "Failed to allocate pages for eq\n"); + return err; + } + + eq->msix_entry_idx = msix_entry->entry; + eq->irq_id = msix_entry->vector; + + err = set_eq_ctrls(eq); + if (err) { + dev_err(hwdev->dev, "Failed to set ctrls for eq\n"); + goto err_free_queue_pages; + } + + set_eq_cons_idx(eq, HINIC3_EQ_ARMED); + + err = request_eq_irq(eq); + if (err) { + dev_err(hwdev->dev, + "Failed to request irq for the eq, err: %d\n", err); + goto err_free_queue_pages; + } + + hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE); + + return 0; + +err_free_queue_pages: + hinic3_queue_pages_free(hwdev, &eq->qpages); + + return err; +} + +static void remove_eq(struct hinic3_eq *eq) +{ + hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx, + HINIC3_MSIX_DISABLE); + free_irq(eq->irq_id, eq); + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + + if (eq->type == HINIC3_AEQ) { + disable_work_sync(&eq->aeq_work); + /* clear eq_len to avoid hw access host memory */ + hinic3_hwif_write_reg(eq->hwdev->hwif, + HINIC3_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update consumer index to avoid invalid interrupt */ + eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED); + hinic3_queue_pages_free(eq->hwdev, &eq->qpages); +} + +int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs, + struct msix_entry *msix_entries) +{ + struct hinic3_aeqs *aeqs; + u16 q_id; + int err; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM, + HINIC3_MAX_AEQS); + if (!aeqs->workq) { + dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto err_free_aeqs; + } + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, + HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ, + &msix_entries[q_id]); + if (err) { + dev_err(hwdev->dev, "Failed to init aeq %u\n", + q_id); + goto err_remove_eqs; + } + } + for (q_id = 0; q_id < num_aeqs; q_id++) + hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return 0; + +err_remove_eqs: + while (q_id > 0) { + q_id--; + remove_eq(&aeqs->aeq[q_id]); + } + + destroy_workqueue(aeqs->workq); + +err_free_aeqs: + kfree(aeqs); + + return err; +} + +void hinic3_aeqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_aeqs *aeqs = hwdev->aeqs; + enum hinic3_aeq_type aeq_event; + struct hinic3_eq *eq; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = aeqs->aeq + q_id; + remove_eq(eq); + hinic3_free_irq(hwdev, eq->irq_id); + } + + for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++) + hinic3_aeq_unregister_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs, + struct msix_entry *msix_entries) +{ + struct hinic3_ceqs *ceqs; + u16 q_id; + int err; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, + HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ, + &msix_entries[q_id]); + if (err) { + dev_err(hwdev->dev, "Failed to init ceq %u\n", + q_id); + goto err_free_ceqs; + } + } + for (q_id = 0; q_id < num_ceqs; q_id++) + hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return 0; + +err_free_ceqs: + while (q_id > 0) { + q_id--; + remove_eq(&ceqs->ceq[q_id]); + } + + kfree(ceqs); + + return err; +} + +void hinic3_ceqs_free(struct hinic3_hwdev *hwdev) +{ + struct hinic3_ceqs *ceqs = hwdev->ceqs; + enum hinic3_ceq_event ceq_event; + struct hinic3_eq *eq; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = ceqs->ceq + q_id; + remove_eq(eq); + hinic3_free_irq(hwdev, eq->irq_id); + } + + for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++) + hinic3_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h new file mode 100644 index 000000000000..005a6e0745b3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_EQS_H_ +#define _HINIC3_EQS_H_ + +#include <linux/interrupt.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_queue_common.h" + +#define HINIC3_MAX_AEQS 4 +#define HINIC3_MAX_CEQS 32 + +#define HINIC3_AEQ_MAX_PAGES 4 +#define HINIC3_CEQ_MAX_PAGES 8 + +#define HINIC3_AEQE_SIZE 64 +#define HINIC3_CEQE_SIZE 4 + +#define HINIC3_AEQE_DESC_SIZE 4 +#define HINIC3_AEQE_DATA_SIZE (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE) + +#define HINIC3_DEFAULT_AEQ_LEN 0x10000 +#define HINIC3_DEFAULT_CEQ_LEN 0x10000 + +#define HINIC3_EQ_IRQ_NAME_LEN 64 + +#define HINIC3_EQ_USLEEP_LOW_BOUND 900 +#define HINIC3_EQ_USLEEP_HIGH_BOUND 1000 + +enum hinic3_eq_type { + HINIC3_AEQ = 0, + HINIC3_CEQ = 1, +}; + +enum hinic3_eq_intr_mode { + HINIC3_INTR_MODE_ARMED = 0, + HINIC3_INTR_MODE_ALWAYS = 1, +}; + +enum hinic3_eq_ci_arm_state { + HINIC3_EQ_NOT_ARMED = 0, + HINIC3_EQ_ARMED = 1, +}; + +struct hinic3_eq { + struct hinic3_hwdev *hwdev; + struct hinic3_queue_pages qpages; + u16 q_id; + enum hinic3_eq_type type; + u32 eq_len; + u32 cons_idx; + u8 wrapped; + u32 irq_id; + u16 msix_entry_idx; + char irq_name[HINIC3_EQ_IRQ_NAME_LEN]; + struct work_struct aeq_work; +}; + +struct hinic3_aeq_elem { + u8 aeqe_data[HINIC3_AEQE_DATA_SIZE]; + __be32 desc; +}; + +enum hinic3_aeq_type { + HINIC3_HW_INTER_INT = 0, + HINIC3_MBX_FROM_FUNC = 1, + HINIC3_MSG_FROM_FW = 2, + HINIC3_MAX_AEQ_EVENTS = 6, +}; + +typedef void (*hinic3_aeq_event_cb)(struct hinic3_hwdev *hwdev, u8 *data, + u8 size); + +struct hinic3_aeqs { + struct hinic3_hwdev *hwdev; + hinic3_aeq_event_cb aeq_cb[HINIC3_MAX_AEQ_EVENTS]; + struct hinic3_eq aeq[HINIC3_MAX_AEQS]; + u16 num_aeqs; + struct workqueue_struct *workq; + /* lock for aeq event flag */ + spinlock_t aeq_lock; +}; + +enum hinic3_ceq_event { + HINIC3_CMDQ = 3, + HINIC3_MAX_CEQ_EVENTS = 6, +}; + +typedef void (*hinic3_ceq_event_cb)(struct hinic3_hwdev *hwdev, + __le32 ceqe_data); + +struct hinic3_ceqs { + struct hinic3_hwdev *hwdev; + + hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS]; + + struct hinic3_eq ceq[HINIC3_MAX_CEQS]; + u16 num_ceqs; + /* lock for ceq event flag */ + spinlock_t ceq_lock; +}; + +int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs, + struct msix_entry *msix_entries); +void hinic3_aeqs_free(struct hinic3_hwdev *hwdev); +int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev, + enum hinic3_aeq_type event, + hinic3_aeq_event_cb hwe_cb); +void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev, + enum hinic3_aeq_type event); +int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs, + struct msix_entry *msix_entries); +void hinic3_ceqs_free(struct hinic3_hwdev *hwdev); +int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev, + enum hinic3_ceq_event event, + hinic3_ceq_event_cb callback); +void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev, + enum hinic3_ceq_event event); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c index 87d9450c30ca..7827c1f626db 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c @@ -8,6 +8,217 @@ #include "hinic3_hwif.h" #include "hinic3_mbox.h" +#define HINIC3_CFG_MAX_QP 256 + +static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev, + struct hinic3_dev_cap *cap, + const struct cfg_cmd_dev_cap *dev_cap, + enum hinic3_func_type type) +{ + cap->port_id = dev_cap->port_id; + cap->supp_svcs_bitmap = dev_cap->svc_cap_en; +} + +static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev, + struct hinic3_dev_cap *cap, + const struct cfg_cmd_dev_cap *dev_cap, + enum hinic3_func_type type) +{ + struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap; + + nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1, + HINIC3_CFG_MAX_QP); +} + +static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev, + const struct cfg_cmd_dev_cap *dev_cap, + enum hinic3_func_type type) +{ + struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap; + + /* Public resource */ + hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type); + + /* L2 NIC resource */ + if (hinic3_support_nic(hwdev)) + hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hinic3_hwdev *hwdev, + enum hinic3_func_type type) +{ + struct mgmt_msg_params msg_params = {}; + struct cfg_cmd_dev_cap dev_cap = {}; + int err; + + dev_cap.func_id = hinic3_global_func_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM, + CFG_CMD_GET_DEV_CAP, &msg_params); + if (err || dev_cap.head.status) { + dev_err(hwdev->dev, + "Failed to get capability from FW, err: %d, status: 0x%x\n", + err, dev_cap.head.status); + return -EIO; + } + + hinic3_parse_dev_cap(hwdev, &dev_cap, type); + + return 0; +} + +static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; + struct hinic3_hwif *hwif = hwdev->hwif; + u16 intr_num = hwif->attr.num_irqs; + struct hinic3_irq_info *irq_info; + u16 intr_needed; + + intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs + + hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num; + if (intr_needed > intr_num) { + dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n", + intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en); + intr_needed = intr_num; + } + + irq_info = &cfg_mgmt->irq_info; + irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq), + GFP_KERNEL); + if (!irq_info->irq) + return -ENOMEM; + + irq_info->num_irq_hw = intr_needed; + mutex_init(&irq_info->irq_mutex); + + return 0; +} + +static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; + struct hinic3_irq *irq = cfg_mgmt->irq_info.irq; + u16 nreq = cfg_mgmt->irq_info.num_irq_hw; + struct pci_dev *pdev = hwdev->pdev; + int actual_irq; + u16 i; + + actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX); + if (actual_irq < 0) { + dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", + actual_irq); + return -ENOMEM; + } + + nreq = actual_irq; + cfg_mgmt->irq_info.num_irq = nreq; + + for (i = 0; i < nreq; ++i) { + irq[i].msix_entry_idx = i; + irq[i].irq_id = pci_irq_vector(pdev, i); + irq[i].allocated = false; + } + + return 0; +} + +int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cfg_mgmt_info *cfg_mgmt; + int err; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + hwdev->cfg_mgmt = cfg_mgmt; + + err = hinic3_init_irq_info(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n", + err); + goto err_free_cfg_mgmt; + } + + err = hinic3_init_irq_alloc_info(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n", + err); + goto err_free_irq_info; + } + + return 0; + +err_free_irq_info: + kfree(cfg_mgmt->irq_info.irq); + cfg_mgmt->irq_info.irq = NULL; +err_free_cfg_mgmt: + kfree(cfg_mgmt); + + return err; +} + +void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt; + + pci_free_irq_vectors(hwdev->pdev); + kfree(cfg_mgmt->irq_info.irq); + cfg_mgmt->irq_info.irq = NULL; + kfree(cfg_mgmt); +} + +int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, + struct msix_entry *alloc_arr, u16 *act_num) +{ + struct hinic3_irq_info *irq_info; + struct hinic3_irq *curr; + u16 i, found = 0; + + irq_info = &hwdev->cfg_mgmt->irq_info; + mutex_lock(&irq_info->irq_mutex); + for (i = 0; i < irq_info->num_irq && found < num; i++) { + curr = irq_info->irq + i; + if (curr->allocated) + continue; + curr->allocated = true; + alloc_arr[found].vector = curr->irq_id; + alloc_arr[found].entry = curr->msix_entry_idx; + found++; + } + mutex_unlock(&irq_info->irq_mutex); + + *act_num = found; + + return found == 0 ? -ENOMEM : 0; +} + +void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id) +{ + struct hinic3_irq_info *irq_info; + struct hinic3_irq *curr; + u16 i; + + irq_info = &hwdev->cfg_mgmt->irq_info; + mutex_lock(&irq_info->irq_mutex); + for (i = 0; i < irq_info->num_irq; i++) { + curr = irq_info->irq + i; + if (curr->irq_id == irq_id) { + curr->allocated = false; + break; + } + } + mutex_unlock(&irq_info->irq_mutex); +} + +int hinic3_init_capability(struct hinic3_hwdev *hwdev) +{ + return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF); +} + bool hinic3_support_nic(struct hinic3_hwdev *hwdev) { return hwdev->cfg_mgmt->cap.supp_svcs_bitmap & diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h index e017b1ae9f05..58806199bf54 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h @@ -42,10 +42,14 @@ struct hinic3_cfg_mgmt_info { struct hinic3_dev_cap cap; }; +int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev); +void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev); + int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, struct msix_entry *alloc_arr, u16 *act_num); void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id); +int hinic3_init_capability(struct hinic3_hwdev *hwdev); bool hinic3_support_nic(struct hinic3_hwdev *hwdev); u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev); u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c index 434696ce7dc2..89638813df40 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c @@ -3,11 +3,43 @@ #include <linux/delay.h> +#include "hinic3_cmdq.h" #include "hinic3_hw_comm.h" #include "hinic3_hwdev.h" #include "hinic3_hwif.h" #include "hinic3_mbox.h" +int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev, + const struct hinic3_interrupt_info *info) +{ + struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + msix_cfg.func_id = hinic3_global_func_id(hwdev); + msix_cfg.msix_index = info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; + + msix_cfg.lli_credit_cnt = info->lli_credit_limit; + msix_cfg.lli_timer_cnt = info->lli_timer_cfg; + msix_cfg.pending_cnt = info->pending_limit; + msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = info->resend_timer_cfg; + + mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params); + if (err || msix_cfg.head.status) { + dev_err(hwdev->dev, + "Failed to set interrupt config, err: %d, status: 0x%x\n", + err, msix_cfg.head.status); + return -EINVAL; + } + + return 0; +} + int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag) { struct comm_cmd_func_reset func_reset = {}; @@ -30,3 +62,365 @@ int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag) return 0; } + +static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode, + u64 *s_feature, u16 size) +{ + struct comm_cmd_feature_nego feature_nego = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + feature_nego.func_id = hinic3_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, + array_size(size, sizeof(u64))); + + mgmt_msg_params_init_default(&msg_params, &feature_nego, + sizeof(feature_nego)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_FEATURE_NEGO, &msg_params); + if (err || feature_nego.head.status) { + dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n", + err, feature_nego.head.status); + return -EINVAL; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, + array_size(size, sizeof(u64))); + + return 0; +} + +int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, + u16 size) +{ + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, + size); +} + +int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, + u16 size) +{ + return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, + size); +} + +int hinic3_get_global_attr(struct hinic3_hwdev *hwdev, + struct comm_global_attr *attr) +{ + struct comm_cmd_get_glb_attr get_attr = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_GET_GLOBAL_ATTR, &msg_params); + if (err || get_attr.head.status) { + dev_err(hwdev->dev, + "Failed to get global attribute, err: %d, status: 0x%x\n", + err, get_attr.head.status); + return -EIO; + } + + memcpy(attr, &get_attr.attr, sizeof(*attr)); + + return 0; +} + +int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type, + u8 state) +{ + struct comm_cmd_set_func_svc_used_state used_state = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + used_state.func_id = hinic3_global_func_id(hwdev); + used_state.svc_type = svc_type; + used_state.used_state = state; + + mgmt_msg_params_init_default(&msg_params, &used_state, + sizeof(used_state)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_FUNC_SVC_USED_STATE, + &msg_params); + if (err || used_state.head.status) { + dev_err(hwdev->dev, + "Failed to set func service used state, err: %d, status: 0x%x\n", + err, used_state.head.status); + return -EIO; + } + + return 0; +} + +int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, + u8 at, u8 ph, u8 no_snooping, u8 tph_en) +{ + struct comm_cmd_set_dma_attr dma_attr = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + dma_attr.func_id = hinic3_global_func_id(hwdev); + dma_attr.entry_idx = entry_idx; + dma_attr.st = st; + dma_attr.at = at; + dma_attr.ph = ph; + dma_attr.no_snooping = no_snooping; + dma_attr.tph_en = tph_en; + + mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_DMA_ATTR, &msg_params); + if (err || dma_attr.head.status) { + dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n", + err, dma_attr.head.status); + return -EIO; + } + + return 0; +} + +int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx, + u32 page_size) +{ + struct comm_cmd_cfg_wq_page_size page_size_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + page_size_info.func_id = func_idx; + page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE); + page_size_info.opcode = MGMT_MSG_CMD_OP_SET; + + mgmt_msg_params_init_default(&msg_params, &page_size_info, + sizeof(page_size_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_CFG_PAGESIZE, &msg_params); + if (err || page_size_info.head.status) { + dev_err(hwdev->dev, + "Failed to set wq page size, err: %d, status: 0x%x\n", + err, page_size_info.head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth) +{ + struct comm_cmd_set_root_ctxt root_ctxt = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = ilog2(cmdq_depth); + + mgmt_msg_params_init_default(&msg_params, &root_ctxt, + sizeof(root_ctxt)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_VAT, &msg_params); + if (err || root_ctxt.head.status) { + dev_err(hwdev->dev, + "Failed to set cmdq depth, err: %d, status: 0x%x\n", + err, root_ctxt.head.status); + return -EFAULT; + } + + return 0; +} + +#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data) +{ + struct hinic3_hwdev *hwdev = priv_data; + enum hinic3_cmdq_type cmdq_type; + struct hinic3_cmdqs *cmdqs; + + cmdqs = hwdev->cmdqs; + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + return HINIC3_WAIT_PROCESS_WAITING; + } + + return HINIC3_WAIT_PROCESS_CPL; +} + +static int wait_cmdq_stop(struct hinic3_hwdev *hwdev) +{ + struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic3_cmdq_type cmdq_type; + int err; + + if (!(cmdqs->status & HINIC3_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HINIC3_CMDQ_ENABLE; + err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler, + HINIC3_WAIT_CMDQ_IDLE_TIMEOUT, + USEC_PER_MSEC); + + if (err) + goto err_reenable_cmdq; + + return 0; + +err_reenable_cmdq: + for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type); + } + cmdqs->status |= HINIC3_CMDQ_ENABLE; + + return err; +} + +int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev) +{ + struct comm_cmd_clear_resource clear_db = {}; + struct comm_cmd_clear_resource clr_res = {}; + struct hinic3_hwif *hwif = hwdev->hwif; + struct mgmt_msg_params msg_params = {}; + int ret = 0; + int err; + + err = wait_cmdq_stop(hwdev); + if (err) { + dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n"); + ret = err; + } + + hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL); + + clear_db.func_id = hwif->attr.func_global_idx; + mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db)); + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_FLUSH_DOORBELL, &msg_params); + if (err || clear_db.head.status) { + dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n", + err, clear_db.head.status); + if (err) + ret = err; + else + ret = -EFAULT; + } + + clr_res.func_id = hwif->attr.func_global_idx; + msg_params.buf_in = &clr_res; + msg_params.in_size = sizeof(clr_res); + err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM, + COMM_CMD_START_FLUSH, + &msg_params); + if (err) { + dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n", + err); + ret = err; + } + + hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL); + + err = hinic3_reinit_cmdq_ctxts(hwdev); + if (err) { + dev_warn(hwdev->dev, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx) +{ + /* Supported RX buffer sizes in bytes. Configured by array index. */ + static const int supported_sizes[16] = { + [0] = 32, [1] = 64, [2] = 96, [3] = 128, + [4] = 192, [5] = 256, [6] = 384, [7] = 512, + [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048, + [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384, + }; + u16 idx; + + /* Scan from biggest to smallest. Choose supported size that is equal or + * smaller. For smaller value HW will under-utilize posted buffers. For + * bigger value HW may overrun posted buffers. + */ + idx = ARRAY_SIZE(supported_sizes); + while (idx > 0) { + idx--; + if (supported_sizes[idx] <= rx_buf_sz) { + *buf_sz_idx = idx; + return 0; + } + } + + return -EINVAL; +} + +int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth, + int rx_buf_sz) +{ + struct comm_cmd_set_root_ctxt root_ctxt = {}; + struct mgmt_msg_params msg_params = {}; + u16 buf_sz_idx; + int err; + + err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx); + if (err) + return err; + + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = ilog2(rq_depth); + root_ctxt.rx_buf_sz = buf_sz_idx; + root_ctxt.sq_depth = ilog2(sq_depth); + + mgmt_msg_params_init_default(&msg_params, &root_ctxt, + sizeof(root_ctxt)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_VAT, &msg_params); + if (err || root_ctxt.head.status) { + dev_err(hwdev->dev, + "Failed to set root context, err: %d, status: 0x%x\n", + err, root_ctxt.head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev) +{ + struct comm_cmd_set_root_ctxt root_ctxt = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + root_ctxt.func_id = hinic3_global_func_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &root_ctxt, + sizeof(root_ctxt)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_SET_VAT, &msg_params); + if (err || root_ctxt.head.status) { + dev_err(hwdev->dev, + "Failed to set root context, err: %d, status: 0x%x\n", + err, root_ctxt.head.status); + return -EFAULT; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h index c33a1c77da9c..304f5691f0c2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h @@ -8,6 +8,40 @@ struct hinic3_hwdev; +#define HINIC3_WQ_PAGE_SIZE_ORDER 8 + +struct hinic3_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limit; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev, + const struct hinic3_interrupt_info *info); int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag); +int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, + u16 size); +int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature, + u16 size); +int hinic3_get_global_attr(struct hinic3_hwdev *hwdev, + struct comm_global_attr *attr); +int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type, + u8 state); +int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st, + u8 at, u8 ph, u8 no_snooping, u8 tph_en); + +int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx, + u32 page_size); +int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth); +int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev); +int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth, + int rx_buf_sz); +int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h index 22c84093efa2..623cf2d14cbc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h @@ -51,6 +51,48 @@ static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_para msg_params->timeout_ms = 0; } +enum cfg_cmd { + CFG_CMD_GET_DEV_CAP = 0, +}; + +/* Device capabilities, defined by hw */ +struct cfg_cmd_dev_cap { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + + /* Public resources */ + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_func; + u8 host_pf_num; + u8 pf_id_start; + u16 host_vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_cap_en; + u16 max_vf; + u8 flexq_en; + u8 valid_cos_bitmap; + u8 port_cos_valid_bitmap; + u8 rsvd2[45]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_default_num_queues; + + u8 rsvd3[250]; +}; + /* COMM Commands between Driver to fw */ enum comm_cmd { /* Commands for clearing FLR and resources */ @@ -70,6 +112,20 @@ enum comm_cmd { COMM_CMD_SET_DMA_ATTR = 25, }; +struct comm_cmd_cfg_msix_ctrl_reg { + struct mgmt_msg_head head; + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + enum comm_func_reset_bits { COMM_FUNC_RESET_BIT_FLUSH = BIT(0), COMM_FUNC_RESET_BIT_MQM = BIT(1), @@ -84,6 +140,11 @@ enum comm_func_reset_bits { COMM_FUNC_RESET_BIT_NIC = BIT(13), }; +#define COMM_FUNC_RESET_FLAG \ + (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \ + COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \ + COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG) + struct comm_cmd_func_reset { struct mgmt_msg_head head; u16 func_id; @@ -100,6 +161,96 @@ struct comm_cmd_feature_nego { u64 s_feature[COMM_MAX_FEATURE_QWORD]; }; +struct comm_global_attr { + u8 max_host_num; + u8 max_pf_num; + u16 vf_id_start; + /* for api cmd to mgmt cpu */ + u8 mgmt_host_node_id; + u8 cmdq_num; + u8 rsvd1[34]; +}; + +struct comm_cmd_get_glb_attr { + struct mgmt_msg_head head; + struct comm_global_attr attr; +}; + +enum comm_func_svc_type { + COMM_FUNC_SVC_T_COMM = 0, + COMM_FUNC_SVC_T_NIC = 1, +}; + +struct comm_cmd_set_func_svc_used_state { + struct mgmt_msg_head head; + u16 func_id; + u16 svc_type; + u8 used_state; + u8 rsvd[35]; +}; + +struct comm_cmd_set_dma_attr { + struct mgmt_msg_head head; + u16 func_id; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv1; +}; + +struct comm_cmd_set_ceq_ctrl_reg { + struct mgmt_msg_head head; + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct comm_cmd_cfg_wq_page_size { + struct mgmt_msg_head head; + u16 func_id; + u8 opcode; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + u32 rsvd1; +}; + +struct comm_cmd_set_root_ctxt { + struct mgmt_msg_head head; + u16 func_id; + u8 set_cmdq_depth; + u8 cmdq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct comm_cmdq_ctxt_info { + __le64 curr_wqe_page_pfn; + __le64 wq_block_pfn; +}; + +struct comm_cmd_set_cmdq_ctxt { + struct mgmt_msg_head head; + u16 func_id; + u8 cmdq_id; + u8 rsvd1[5]; + struct comm_cmdq_ctxt_info ctxt; +}; + +struct comm_cmd_clear_resource { + struct mgmt_msg_head head; + u16 func_id; + u16 rsvd1[3]; +}; + /* Services supported by HW. HW uses these values when delivering events. * HW supports multiple services that are not yet supported by driver * (e.g. RoCE). diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c index 6e8788a64925..95a213133be9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c @@ -1,24 +1,557 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. +#include "hinic3_cmdq.h" +#include "hinic3_csr.h" +#include "hinic3_eqs.h" #include "hinic3_hw_comm.h" #include "hinic3_hwdev.h" #include "hinic3_hwif.h" #include "hinic3_mbox.h" #include "hinic3_mgmt.h" +#define HINIC3_PCIE_SNOOP 0 +#define HINIC3_PCIE_TPH_DISABLE 0 + +#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0) +#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \ + FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val) + +#define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0) +#define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8) +#define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10) +#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12) +#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13) +#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \ + FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val) + +#define HINIC3_PCIE_ST_DISABLE 0 +#define HINIC3_PCIE_AT_DISABLE 0 +#define HINIC3_PCIE_PH_DISABLE 0 +#define HINIC3_PCIE_MSIX_ATTR_ENTRY 0 + +#define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware" +#define HINIC3_WQ_MAX_REQ 10 + +enum hinic3_hwdev_init_state { + HINIC3_HWDEV_MBOX_INITED = 2, + HINIC3_HWDEV_CMDQ_INITED = 3, +}; + +static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev) +{ + struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS]; + u16 num_aeqs, resp_num_irq, i; + int err; + + num_aeqs = hwdev->hwif->attr.num_aeqs; + if (num_aeqs > HINIC3_MAX_AEQS) { + dev_warn(hwdev->dev, "Adjust aeq num to %d\n", + HINIC3_MAX_AEQS); + num_aeqs = HINIC3_MAX_AEQS; + } + err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries, + &resp_num_irq); + if (err) { + dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + dev_warn(hwdev->dev, "Adjust aeq num to %u\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries); + if (err) { + dev_err(hwdev->dev, "Failed to init aeqs\n"); + goto err_free_irqs; + } + + return 0; + +err_free_irqs: + for (i = 0; i < num_aeqs; i++) + hinic3_free_irq(hwdev, aeq_msix_entries[i].vector); + + return err; +} + +static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev) +{ + struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS]; + u16 num_ceqs, resp_num_irq, i; + int err; + + num_ceqs = hwdev->hwif->attr.num_ceqs; + if (num_ceqs > HINIC3_MAX_CEQS) { + dev_warn(hwdev->dev, "Adjust ceq num to %d\n", + HINIC3_MAX_CEQS); + num_ceqs = HINIC3_MAX_CEQS; + } + + err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries, + &resp_num_irq); + if (err) { + dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + dev_warn(hwdev->dev, "Adjust ceq num to %u\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries); + if (err) { + dev_err(hwdev->dev, + "Failed to init ceqs, err:%d\n", err); + goto err_free_irqs; + } + + return 0; + +err_free_irqs: + for (i = 0; i < num_ceqs; i++) + hinic3_free_irq(hwdev, ceq_msix_entries[i].vector); + + return err; +} + +static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_init_mbox(hwdev); + if (err) + return err; + + hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC, + hinic3_mbox_func_aeqe_handler); + hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW, + hinic3_mgmt_msg_aeqe_handler); + + set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC); + hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW); + hinic3_free_mbox(hwdev); +} + +static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev) +{ + struct hinic3_aeqs *aeqs = hwdev->aeqs; + struct hinic3_interrupt_info info = {}; + struct hinic3_eq *eq; + u16 q_id; + int err; + + info.interrupt_coalesc_set = 1; + info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->msix_entry_idx; + err = hinic3_set_interrupt_cfg_direct(hwdev, &info); + if (err) { + dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n", + q_id); + return err; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev) +{ + struct hinic3_ceqs *ceqs = hwdev->ceqs; + struct hinic3_interrupt_info info = {}; + struct hinic3_eq *eq; + u16 q_id; + int err; + + info.interrupt_coalesc_set = 1; + info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->msix_entry_idx; + err = hinic3_set_interrupt_cfg_direct(hwdev, &info); + if (err) { + dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n", + q_id); + return err; + } + } + + return 0; +} + +static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_comm_aeqs_init(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init async event queues\n"); + return err; + } + + err = hinic3_comm_mbox_init(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init mailbox\n"); + goto err_free_comm_aeqs; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init aeqs msix attr\n"); + goto err_free_comm_mbox; + } + + return 0; + +err_free_comm_mbox: + hinic3_comm_mbox_free(hwdev); +err_free_comm_aeqs: + hinic3_aeqs_free(hwdev); + + return err; +} + +static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev) +{ + hinic3_comm_mbox_free(hwdev); + hinic3_aeqs_free(hwdev); +} + +static int dma_attr_table_init(struct hinic3_hwdev *hwdev) +{ + u32 addr, val, dst_attr; + + /* Indirect access, set entry_idx first */ + addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR; + val = hinic3_hwif_read_reg(hwdev->hwif, addr); + val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK; + val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX); + hinic3_hwif_write_reg(hwdev->hwif, addr, val); + + addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR; + val = hinic3_hwif_read_reg(hwdev->hwif, addr); + + dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) | + HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN); + if (val == dst_attr) + return 0; + + return hinic3_set_dma_attr_tbl(hwdev, + HINIC3_PCIE_MSIX_ATTR_ENTRY, + HINIC3_PCIE_ST_DISABLE, + HINIC3_PCIE_AT_DISABLE, + HINIC3_PCIE_PH_DISABLE, + HINIC3_PCIE_SNOOP, + HINIC3_PCIE_TPH_DISABLE); +} + +static int init_basic_attributes(struct hinic3_hwdev *hwdev) +{ + struct comm_global_attr glb_attr; + int err; + + err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev), + COMM_FUNC_RESET_FLAG); + if (err) + return err; + + err = hinic3_get_comm_features(hwdev, hwdev->features, + COMM_MAX_FEATURE_QWORD); + if (err) + return err; + + dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]); + + err = hinic3_get_global_attr(hwdev, &glb_attr); + if (err) + return err; + + err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1); + if (err) + return err; + + err = dma_attr_table_init(hwdev); + if (err) + return err; + + hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES); + dev_dbg(hwdev->dev, + "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n", + glb_attr.max_host_num, glb_attr.max_pf_num, + glb_attr.vf_id_start, glb_attr.mgmt_host_node_id, + glb_attr.cmdq_num); + + return 0; +} + +static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_cmdqs_init(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init cmd queues\n"); + return err; + } + + hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler); + + err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH); + if (err) { + dev_err(hwdev->dev, "Failed to set cmdq depth\n"); + goto err_free_cmdqs; + } + + set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); + + return 0; + +err_free_cmdqs: + hinic3_cmdqs_free(hwdev); + + return err; +} + +static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ); + hinic3_cmdqs_free(hwdev); +} + +static int init_cmdqs_channel(struct hinic3_hwdev *hwdev) +{ + int err; + + err = hinic3_comm_ceqs_init(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init completion event queues\n"); + return err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init ceqs msix attr\n"); + goto err_free_ceqs; + } + + hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER; + err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), + hwdev->wq_page_size); + if (err) { + dev_err(hwdev->dev, "Failed to set wq page size\n"); + goto err_free_ceqs; + } + + err = hinic3_comm_cmdqs_init(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init cmd queues\n"); + goto err_reset_wq_page_size; + } + + return 0; + +err_reset_wq_page_size: + hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), + HINIC3_MIN_PAGE_SIZE); +err_free_ceqs: + hinic3_ceqs_free(hwdev); + + return err; +} + +static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev) +{ + hinic3_comm_cmdqs_free(hwdev); + hinic3_ceqs_free(hwdev); +} + +static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev) +{ + int err; + + err = init_basic_mgmt_channel(hwdev); + if (err) + return err; + + err = init_basic_attributes(hwdev); + if (err) + goto err_free_basic_mgmt_ch; + + err = init_cmdqs_channel(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init cmdq channel\n"); + goto err_clear_func_svc_used_state; + } + + return 0; + +err_clear_func_svc_used_state: + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0); +err_free_basic_mgmt_ch: + free_base_mgmt_channel(hwdev); + + return err; +} + +static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev) +{ + hinic3_free_cmdqs_channel(hwdev); + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0); + free_base_mgmt_channel(hwdev); +} + +static DEFINE_IDA(hinic3_adev_ida); + +static int hinic3_adev_idx_alloc(void) +{ + return ida_alloc(&hinic3_adev_ida, GFP_KERNEL); +} + +static void hinic3_adev_idx_free(int id) +{ + ida_free(&hinic3_adev_ida, id); +} + int hinic3_init_hwdev(struct pci_dev *pdev) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + struct hinic3_hwdev *hwdev; + int err; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + pci_adapter->hwdev = hwdev; + hwdev->adapter = pci_adapter; + hwdev->pdev = pci_adapter->pdev; + hwdev->dev = &pci_adapter->pdev->dev; + hwdev->func_state = 0; + hwdev->dev_id = hinic3_adev_idx_alloc(); + spin_lock_init(&hwdev->channel_lock); + + err = hinic3_init_hwif(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init hwif\n"); + goto err_free_hwdev; + } + + hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, + HINIC3_WQ_MAX_REQ); + if (!hwdev->workq) { + dev_err(hwdev->dev, "Failed to alloc hardware workq\n"); + err = -ENOMEM; + goto err_free_hwif; + } + + err = hinic3_init_cfg_mgmt(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init config mgmt\n"); + goto err_destroy_workqueue; + } + + err = hinic3_init_comm_ch(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init communication channel\n"); + goto err_free_cfg_mgmt; + } + + err = hinic3_init_capability(hwdev); + if (err) { + dev_err(hwdev->dev, "Failed to init capability\n"); + goto err_uninit_comm_ch; + } + + err = hinic3_set_comm_features(hwdev, hwdev->features, + COMM_MAX_FEATURE_QWORD); + if (err) { + dev_err(hwdev->dev, "Failed to set comm features\n"); + goto err_uninit_comm_ch; + } + + return 0; + +err_uninit_comm_ch: + hinic3_uninit_comm_ch(hwdev); +err_free_cfg_mgmt: + hinic3_free_cfg_mgmt(hwdev); +err_destroy_workqueue: + destroy_workqueue(hwdev->workq); +err_free_hwif: + hinic3_free_hwif(hwdev); +err_free_hwdev: + pci_adapter->hwdev = NULL; + hinic3_adev_idx_free(hwdev->dev_id); + kfree(hwdev); + + return err; } void hinic3_free_hwdev(struct hinic3_hwdev *hwdev) { - /* Completed by later submission due to LoC limit. */ + u64 drv_features[COMM_MAX_FEATURE_QWORD] = {}; + + hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD); + hinic3_func_rx_tx_flush(hwdev); + hinic3_uninit_comm_ch(hwdev); + hinic3_free_cfg_mgmt(hwdev); + destroy_workqueue(hwdev->workq); + hinic3_free_hwif(hwdev); + hinic3_adev_idx_free(hwdev->dev_id); + kfree(hwdev); } void hinic3_set_api_stop(struct hinic3_hwdev *hwdev) { - /* Completed by later submission due to LoC limit. */ + struct hinic3_mbox *mbox; + + spin_lock_bh(&hwdev->channel_lock); + if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) { + mbox = hwdev->mbox; + spin_lock(&mbox->mbox_lock); + if (mbox->event_flag == MBOX_EVENT_START) + mbox->event_flag = MBOX_EVENT_TIMEOUT; + spin_unlock(&mbox->mbox_lock); + } + + if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state)) + hinic3_cmdq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c index 0865453bf0e7..f76f140fb6f7 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c @@ -6,13 +6,428 @@ #include <linux/io.h> #include "hinic3_common.h" +#include "hinic3_csr.h" #include "hinic3_hwdev.h" #include "hinic3_hwif.h" +#define HINIC3_HWIF_READY_TIMEOUT 10000 +#define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000 +#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF + +/* config BAR4/5 4MB, DB & DWQE both 2MB */ +#define HINIC3_DB_DWQE_SIZE 0x00400000 + +/* db/dwqe page size: 4K */ +#define HINIC3_DB_PAGE_SIZE 0x00001000 +#define HINIC3_DWQE_OFFSET 0x00000800 +#define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE) + +#define HINIC3_MAX_MSIX_ENTRY 2048 + +#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0) +#define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12) +#define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17) +#define HINIC3_AF0_FUNC_TYPE_MASK BIT(28) +#define HINIC3_AF0_GET(val, member) \ + FIELD_GET(HINIC3_AF0_##member##_MASK, val) + +#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8) +#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30) +#define HINIC3_AF1_GET(val, member) \ + FIELD_GET(HINIC3_AF1_##member##_MASK, val) + +#define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0) +#define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16) +#define HINIC3_AF2_GET(val, member) \ + FIELD_GET(HINIC3_AF2_##member##_MASK, val) + +#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0) +#define HINIC3_AF4_GET(val, member) \ + FIELD_GET(HINIC3_AF4_##member##_MASK, val) +#define HINIC3_AF4_SET(val, member) \ + FIELD_PREP(HINIC3_AF4_##member##_MASK, val) + +#define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0) +#define HINIC3_AF5_GET(val, member) \ + FIELD_GET(HINIC3_AF5_##member##_MASK, val) + +#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0) +#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23) +#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22) +#define HINIC3_AF6_GET(val, member) \ + FIELD_GET(HINIC3_AF6_##member##_MASK, val) + +#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK)) + +static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg) +{ + return hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg); +} + +u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg) +{ + void __iomem *addr = hinic3_reg_addr(hwif, reg); + + return ioread32be(addr); +} + +void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val) +{ + void __iomem *addr = hinic3_reg_addr(hwif, reg); + + iowrite32be(val, addr); +} + +static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data) +{ + struct hinic3_hwdev *hwdev = priv_data; + u32 attr1; + + attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); + + return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ? + HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING; +} + +static int wait_hwif_ready(struct hinic3_hwdev *hwdev) +{ + return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler, + HINIC3_HWIF_READY_TIMEOUT, + USEC_PER_MSEC); +} + +/* Set attr struct from HW attr values. */ +static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1, + u32 attr2, u32 attr3, u32 attr6) +{ + attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX); + attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX); + attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX); + attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE); + + attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC)); + attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC); + attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC); + if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY) + attr->num_irqs = HINIC3_MAX_MSIX_ENTRY; + + attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ); + attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN); +} + +/* Read attributes from HW and set attribute struct. */ +static int init_hwif_attr(struct hinic3_hwdev *hwdev) +{ + u32 attr0, attr1, attr2, attr3, attr6; + struct hinic3_hwif *hwif; + + hwif = hwdev->hwif; + attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR); + if (attr0 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR); + if (attr2 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR); + if (attr3 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR); + if (attr6 == HINIC3_PCIE_LINK_DOWN) + return -EFAULT; + + set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6); + + if (!hwif->attr.num_ceqs) { + dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + + if (!hwif->attr.num_irqs) { + dev_err(hwdev->dev, + "Irq num cfg in fw is zero, msix_flex_en %d\n", + hwif->attr.msix_flex_en); + return -EFAULT; + } + + return 0; +} + +static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif) +{ + u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); + + return HINIC3_AF4_GET(attr4, DOORBELL_CTRL); +} + +static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif) +{ + u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); + + return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL); +} + +void hinic3_toggle_doorbell(struct hinic3_hwif *hwif, + enum hinic3_doorbell_ctrl flag) +{ + u32 addr, attr4; + + addr = HINIC3_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic3_hwif_read_reg(hwif, addr); + + attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK; + attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL); + + hinic3_hwif_write_reg(hwif, addr, attr4); +} + +static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy, + u8 __iomem *db_base, u64 db_dwqe_len) +{ + struct hinic3_db_area *db_area = &hwif->db_area; + u32 db_max_areas; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->db_dwqe_len = db_dwqe_len; + + db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ? + HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE; + db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); + if (!db_area->db_bitmap_array) + return -ENOMEM; + + db_area->db_max_areas = db_max_areas; + spin_lock_init(&db_area->idx_lock); + + return 0; +} + +static void db_area_idx_free(struct hinic3_db_area *db_area) +{ + bitmap_free(db_area->db_bitmap_array); +} + +static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx) +{ + struct hinic3_db_area *db_area = &hwif->db_area; + u32 pg_idx; + + spin_lock(&db_area->idx_lock); + pg_idx = find_first_zero_bit(db_area->db_bitmap_array, + db_area->db_max_areas); + if (pg_idx == db_area->db_max_areas) { + spin_unlock(&db_area->idx_lock); + return -ENOMEM; + } + set_bit(pg_idx, db_area->db_bitmap_array); + spin_unlock(&db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic3_hwif *hwif, u32 idx) +{ + struct hinic3_db_area *db_area = &hwif->db_area; + + spin_lock(&db_area->idx_lock); + clear_bit(idx, db_area->db_bitmap_array); + spin_unlock(&db_area->idx_lock); +} + +void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base) +{ + struct hinic3_hwif *hwif; + uintptr_t distance; + u32 idx; + + hwif = hwdev->hwif; + distance = db_base - hwif->db_base; + idx = distance / HINIC3_DB_PAGE_SIZE; + + free_db_idx(hwif, idx); +} + +int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hinic3_hwif *hwif; + u8 __iomem *addr; + u32 idx; + int err; + + hwif = hwdev->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return err; + + addr = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE; + *db_base = addr; + + if (dwqe_base) + *dwqe_base = addr + HINIC3_DWQE_OFFSET; + + return 0; +} + void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, enum hinic3_msix_state flag) { - /* Completed by later submission due to LoC limit. */ + struct hinic3_hwif *hwif; + u8 int_msk = 1; + u32 mask_bits; + u32 addr; + + hwif = hwdev->hwif; + + if (flag) + mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); + else + mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); + mask_bits = mask_bits | + HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, mask_bits); +} + +static void disable_all_msix(struct hinic3_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE); +} + +void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic3_hwif *hwif; + u32 msix_ctrl, addr; + + hwif = hwdev->hwif; + + msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | + HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, msix_ctrl); +} + +void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_auto_mask flag) +{ + struct hinic3_hwif *hwif; + u32 mask_bits; + u32 addr; + + hwif = hwdev->hwif; + + if (flag) + mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); + else + mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); + + mask_bits = mask_bits | + HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic3_hwif_write_reg(hwif, addr, mask_bits); +} + +static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data) +{ + enum hinic3_outbound_ctrl outbound_ctrl; + struct hinic3_hwif *hwif = priv_data; + enum hinic3_doorbell_ctrl db_ctrl; + + db_ctrl = hinic3_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif); + if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) + return HINIC3_WAIT_PROCESS_CPL; + + return HINIC3_WAIT_PROCESS_WAITING; +} + +static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif) +{ + return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler, + HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT, + USEC_PER_MSEC); +} + +int hinic3_init_hwif(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + struct hinic3_hwif *hwif; + u32 attr1, attr4, attr5; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base + + HINIC3_VF_CFG_REG_OFFSET; + + err = db_area_idx_init(hwif, pci_adapter->db_base_phy, + pci_adapter->db_base, + pci_adapter->db_dwqe_len); + if (err) { + dev_err(hwdev->dev, "Failed to init db area.\n"); + goto err_free_hwif; + } + + err = wait_hwif_ready(hwdev); + if (err) { + attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR); + dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n", + attr1); + goto err_free_db_area_idx; + } + + err = init_hwif_attr(hwdev); + if (err) { + dev_err(hwdev->dev, "Init hwif attr failed\n"); + goto err_free_db_area_idx; + } + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR); + attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR); + dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", + attr4, attr5); + goto err_free_db_area_idx; + } + + disable_all_msix(hwdev); + + return 0; + +err_free_db_area_idx: + db_area_idx_free(&hwif->db_area); +err_free_hwif: + kfree(hwif); + + return err; +} + +void hinic3_free_hwif(struct hinic3_hwdev *hwdev) +{ + db_area_idx_free(&hwdev->hwif->db_area); + kfree(hwdev->hwif); } u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h index 513c9680e6b6..c02904e861cc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h @@ -45,13 +45,45 @@ struct hinic3_hwif { struct hinic3_func_attr attr; }; +enum hinic3_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hinic3_doorbell_ctrl { + ENABLE_DOORBELL = 0, + DISABLE_DOORBELL = 1, +}; + enum hinic3_msix_state { HINIC3_MSIX_ENABLE, HINIC3_MSIX_DISABLE, }; +enum hinic3_msix_auto_mask { + HINIC3_CLR_MSIX_AUTO_MASK, + HINIC3_SET_MSIX_AUTO_MASK, +}; + +u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg); +void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val); + +void hinic3_toggle_doorbell(struct hinic3_hwif *hwif, + enum hinic3_doorbell_ctrl flag); + +int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); +void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base); + +int hinic3_init_hwif(struct hinic3_hwdev *hwdev); +void hinic3_free_hwif(struct hinic3_hwdev *hwdev); + void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, enum hinic3_msix_state flag); +void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx, + u8 clear_resend_en); +void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_auto_mask flag); u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c index 8b92eed25edf..a69b361225e9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -38,19 +38,19 @@ static int hinic3_poll(struct napi_struct *napi, int budget) return work_done; } -void qp_add_napi(struct hinic3_irq_cfg *irq_cfg) +static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg) { struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi); netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi); - netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); napi_enable(&irq_cfg->napi); } -void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) +static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) { napi_disable(&irq_cfg->napi); netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, @@ -60,3 +60,135 @@ void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id); netif_napi_del(&irq_cfg->napi); } + +static irqreturn_t qp_irq(int irq, void *data) +{ + struct hinic3_irq_cfg *irq_cfg = data; + struct hinic3_nic_dev *nic_dev; + + nic_dev = netdev_priv(irq_cfg->netdev); + hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev, + irq_cfg->msix_entry_idx, 1); + + napi_schedule(&irq_cfg->napi); + + return IRQ_HANDLED; +} + +static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id) +{ + struct hinic3_interrupt_info info = {}; + struct hinic3_nic_dev *nic_dev; + struct net_device *netdev; + int err; + + netdev = irq_cfg->netdev; + nic_dev = netdev_priv(netdev); + qp_add_napi(irq_cfg); + + info.msix_index = irq_cfg->msix_entry_idx; + info.interrupt_coalesc_set = 1; + info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit; + info.coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info); + if (err) { + netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n"); + qp_del_napi(irq_cfg); + return err; + } + + err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name, + irq_cfg); + if (err) { + qp_del_napi(irq_cfg); + return err; + } + + irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); + + return 0; +} + +static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg) +{ + irq_set_affinity_hint(irq_cfg->irq_id, NULL); + free_irq(irq_cfg->irq_id, irq_cfg); +} + +int hinic3_qps_irq_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic3_irq_cfg *irq_cfg; + struct msix_entry *msix_entry; + u32 local_cpu; + u16 q_id; + int err; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + msix_entry = &nic_dev->qps_msix_entries[q_id]; + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + + irq_cfg->irq_id = msix_entry->vector; + irq_cfg->msix_entry_idx = msix_entry->entry; + irq_cfg->netdev = netdev; + irq_cfg->txq = &nic_dev->txqs[q_id]; + irq_cfg->rxq = &nic_dev->rxqs[q_id]; + nic_dev->rxqs[q_id].irq_cfg = irq_cfg; + + local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev)); + cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); + + snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), + "%s_qp%u", netdev->name, q_id); + + err = hinic3_request_irq(irq_cfg, q_id); + if (err) { + netdev_err(netdev, "Failed to request Rx irq\n"); + goto err_release_irqs; + } + + hinic3_set_msix_auto_mask_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC3_SET_MSIX_AUTO_MASK); + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_ENABLE); + } + + return 0; + +err_release_irqs: + while (q_id > 0) { + q_id--; + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + qp_del_napi(irq_cfg); + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_DISABLE); + hinic3_set_msix_auto_mask_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC3_CLR_MSIX_AUTO_MASK); + hinic3_release_irq(irq_cfg); + } + + return err; +} + +void hinic3_qps_irq_uninit(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_irq_cfg *irq_cfg; + u16 q_id; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + qp_del_napi(irq_cfg); + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_DISABLE); + hinic3_set_msix_auto_mask_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC3_CLR_MSIX_AUTO_MASK); + hinic3_release_irq(irq_cfg); + } +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c index 4827326e6a59..3db8241a3b0c 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c @@ -8,6 +8,7 @@ #include "hinic3_hwdev.h" #include "hinic3_lld.h" #include "hinic3_mgmt.h" +#include "hinic3_pci_id_tbl.h" #define HINIC3_VF_PCI_CFG_REG_BAR 0 #define HINIC3_PCI_INTR_REG_BAR 2 @@ -121,6 +122,7 @@ static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev) goto err_del_adevs; } mutex_unlock(&pci_adapter->pdev_mutex); + return 0; err_del_adevs: @@ -132,6 +134,7 @@ err_del_adevs: } } mutex_unlock(&pci_adapter->pdev_mutex); + return -ENOMEM; } @@ -153,6 +156,7 @@ struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev) struct hinic3_adev *hadev; hadev = container_of(adev, struct hinic3_adev, adev); + return hadev->hwdev; } @@ -307,6 +311,7 @@ static void hinic3_func_uninit(struct pci_dev *pdev) { struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + hinic3_flush_mgmt_workq(pci_adapter->hwdev); hinic3_detach_aux_devices(pci_adapter->hwdev); hinic3_free_hwdev(pci_adapter->hwdev); } @@ -333,6 +338,7 @@ err_unmap_bar: err_out: dev_err(&pdev->dev, "PCIe device probe function failed\n"); + return err; } @@ -365,6 +371,7 @@ err_uninit_pci: err_out: dev_err(&pdev->dev, "PCIe device probe failed\n"); + return err; } @@ -377,7 +384,7 @@ static void hinic3_remove(struct pci_dev *pdev) } static const struct pci_device_id hinic3_pci_table[] = { - /* Completed by later submission due to LoC limit. */ + {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0}, {0, 0} }; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c index 497f2a36f35d..6d87d4d895ba 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -12,17 +12,59 @@ #include "hinic3_nic_cfg.h" #include "hinic3_nic_dev.h" #include "hinic3_nic_io.h" +#include "hinic3_rss.h" #include "hinic3_rx.h" #include "hinic3_tx.h" #define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver" -#define HINIC3_RX_BUF_LEN 2048 -#define HINIC3_LRO_REPLENISH_THLD 256 -#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" +#define HINIC3_RX_BUF_LEN 2048 +#define HINIC3_LRO_REPLENISH_THLD 256 +#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" -#define HINIC3_SQ_DEPTH 1024 -#define HINIC3_RQ_DEPTH 1024 +#define HINIC3_SQ_DEPTH 1024 +#define HINIC3_RQ_DEPTH 1024 + +#define HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25 +#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +static void init_intr_coal_param(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info *info; + u16 i; + + for (i = 0; i < nic_dev->max_qps; i++) { + info = &nic_dev->intr_coalesce[i]; + info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT; + info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG; + info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG; + } +} + +static int hinic3_init_intr_coalesce(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->intr_coalesce = kcalloc(nic_dev->max_qps, + sizeof(*nic_dev->intr_coalesce), + GFP_KERNEL); + + if (!nic_dev->intr_coalesce) + return -ENOMEM; + + init_intr_coal_param(netdev); + + return 0; +} + +static void hinic3_free_intr_coalesce(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->intr_coalesce); +} static int hinic3_alloc_txrxqs(struct net_device *netdev) { @@ -42,8 +84,17 @@ static int hinic3_alloc_txrxqs(struct net_device *netdev) goto err_free_txqs; } + err = hinic3_init_intr_coalesce(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to init_intr_coalesce\n"); + goto err_free_rxqs; + } + return 0; +err_free_rxqs: + hinic3_free_rxqs(netdev); + err_free_txqs: hinic3_free_txqs(netdev); @@ -52,6 +103,7 @@ err_free_txqs: static void hinic3_free_txrxqs(struct net_device *netdev) { + hinic3_free_intr_coalesce(netdev); hinic3_free_rxqs(netdev); hinic3_free_txqs(netdev); } @@ -83,6 +135,8 @@ static int hinic3_sw_init(struct net_device *netdev) nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; + hinic3_try_to_enable_rss(netdev); + /* VF driver always uses random MAC address. During VM migration to a * new device, the new device should learn the VMs old MAC rather than * provide its own MAC. The product design assumes that every VF is @@ -94,7 +148,7 @@ static int hinic3_sw_init(struct net_device *netdev) hinic3_global_func_id(hwdev)); if (err) { dev_err(hwdev->dev, "Failed to set default MAC\n"); - return err; + goto err_clear_rss_config; } err = hinic3_alloc_txrxqs(netdev); @@ -108,6 +162,8 @@ static int hinic3_sw_init(struct net_device *netdev) err_del_mac: hinic3_del_mac(hwdev, netdev->dev_addr, 0, hinic3_global_func_id(hwdev)); +err_clear_rss_config: + hinic3_clear_rss_config(netdev); return err; } @@ -119,6 +175,7 @@ static void hinic3_sw_uninit(struct net_device *netdev) hinic3_free_txrxqs(netdev); hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, hinic3_global_func_id(nic_dev->hwdev)); + hinic3_clear_rss_config(netdev); } static void hinic3_assign_netdev_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c index e74d1eb09730..cf67e26acece 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c @@ -4,13 +4,857 @@ #include <linux/dma-mapping.h> #include "hinic3_common.h" +#include "hinic3_csr.h" #include "hinic3_hwdev.h" #include "hinic3_hwif.h" #include "hinic3_mbox.h" +#define MBOX_INT_DST_AEQN_MASK GENMASK(11, 10) +#define MBOX_INT_SRC_RESP_AEQN_MASK GENMASK(13, 12) +#define MBOX_INT_STAT_DMA_MASK GENMASK(19, 14) +/* TX size, expressed in 4 bytes units */ +#define MBOX_INT_TX_SIZE_MASK GENMASK(24, 20) +/* SO_RO == strong order, relaxed order */ +#define MBOX_INT_STAT_DMA_SO_RO_MASK GENMASK(26, 25) +#define MBOX_INT_WB_EN_MASK BIT(28) +#define MBOX_INT_SET(val, field) \ + FIELD_PREP(MBOX_INT_##field##_MASK, val) + +#define MBOX_CTRL_TRIGGER_AEQE_MASK BIT(0) +#define MBOX_CTRL_TX_STATUS_MASK BIT(1) +#define MBOX_CTRL_DST_FUNC_MASK GENMASK(28, 16) +#define MBOX_CTRL_SET(val, field) \ + FIELD_PREP(MBOX_CTRL_##field##_MASK, val) + +#define MBOX_MSG_POLLING_TIMEOUT_MS 8000 // send msg seg timeout +#define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response + +#define MBOX_MAX_BUF_SZ 2048 +#define MBOX_HEADER_SZ 8 + +/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16 + +#define MBOX_SEQ_ID_START_VAL 0 +#define MBOX_SEQ_ID_MAX_VAL 42 +#define MBOX_LAST_SEG_MAX_LEN \ + (MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN) + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) == \ + MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define MBOX_DMA_MSG_QUEUE_DEPTH 32 +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_MQ_CI_OFFSET \ + (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \ + MBOX_HEADER_SZ + MBOX_SEG_LEN) + +#define MBOX_MQ_SYNC_CI_MASK GENMASK(7, 0) +#define MBOX_MQ_ASYNC_CI_MASK GENMASK(15, 8) +#define MBOX_MQ_CI_GET(val, field) \ + FIELD_GET(MBOX_MQ_##field##_CI_MASK, val) + +#define MBOX_MGMT_FUNC_ID 0x1FFF +#define MBOX_COMM_F_MBOX_SEGMENT BIT(3) + +static u8 *get_mobx_body_from_hdr(u8 *header) +{ + return header + MBOX_HEADER_SZ; +} + +static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox, + enum mbox_msg_direction_type dir, + u16 src_func_id) +{ + struct hinic3_msg_channel *msg_ch; + + msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ? + &mbox->mgmt_msg : mbox->func_msg; + + return (dir == MBOX_MSG_SEND) ? + &msg_ch->recv_msg : &msg_ch->resp_msg; +} + +static void resp_mbox_handler(struct hinic3_mbox *mbox, + const struct hinic3_msg_desc *msg_desc) +{ + spin_lock(&mbox->mbox_lock); + if (msg_desc->msg_info.msg_id == mbox->send_msg_id && + mbox->event_flag == MBOX_EVENT_START) + mbox->event_flag = MBOX_EVENT_SUCCESS; + spin_unlock(&mbox->mbox_lock); +} + +static bool mbox_segment_valid(struct hinic3_mbox *mbox, + struct hinic3_msg_desc *msg_desc, + __le64 mbox_header) +{ + u8 seq_id, seg_len, msg_id, mod; + __le16 src_func_idx, cmd; + + seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN); + msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID); + mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE); + cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD)); + src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, + SRC_GLB_FUNC_IDX)); + + if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || + (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) + goto err_seg; + + if (seq_id == 0) { + msg_desc->seq_id = seq_id; + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } else { + if (seq_id != msg_desc->seq_id + 1 || + msg_id != msg_desc->msg_info.msg_id || + mod != msg_desc->mod || cmd != msg_desc->cmd) + goto err_seg; + + msg_desc->seq_id = seq_id; + } + + return true; + +err_seg: + dev_err(mbox->hwdev->dev, + "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, + msg_desc->mod, msg_desc->cmd); + dev_err(mbox->hwdev->dev, + "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + seg_len, seq_id, msg_id, mod, cmd); + + return false; +} + +static void recv_mbox_handler(struct hinic3_mbox *mbox, + u8 *header, struct hinic3_msg_desc *msg_desc) +{ + __le64 mbox_header = *((__force __le64 *)header); + u8 *mbox_body = get_mobx_body_from_hdr(header); + u8 seq_id, seg_len; + int pos; + + if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) { + msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL; + return; + } + + seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN); + + pos = seq_id * MBOX_SEG_LEN; + memcpy(msg_desc->msg + pos, mbox_body, seg_len); + + if (!MBOX_MSG_HEADER_GET(mbox_header, LAST)) + return; + + msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, + MSG_LEN)); + msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS); + + if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP) + resp_mbox_handler(mbox, msg_desc); +} + +void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header, + u8 size) +{ + __le64 mbox_header = *((__force __le64 *)header); + enum mbox_msg_direction_type dir; + struct hinic3_msg_desc *msg_desc; + struct hinic3_mbox *mbox; + u16 src_func_id; + + mbox = hwdev->mbox; + dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION); + src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id); + recv_mbox_handler(mbox, header, msg_desc); +} + +static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev, + struct mbox_dma_queue *mq) +{ + u32 size; + + mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; + mq->prod_idx = 0; + mq->cons_idx = 0; + + size = mq->depth * MBOX_MAX_BUF_SZ; + mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size, + &mq->dma_buf_paddr, + GFP_KERNEL); + if (!mq->dma_buf_vaddr) + return -ENOMEM; + + return 0; +} + +static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev, + struct mbox_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ, + mq->dma_buf_vaddr, mq->dma_buf_paddr); +} + +static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox) +{ + u32 val; + int err; + + err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue); + if (err) + return err; + + err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue); + if (err) { + uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue); + return err; + } + + val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET); + val &= ~MBOX_MQ_SYNC_CI_MASK; + val &= ~MBOX_MQ_ASYNC_CI_MASK; + hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); + + return 0; +} + +static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox) +{ + uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue); + uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue); +} + +static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch) +{ + msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->resp_msg.msg) + return -ENOMEM; + + msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->recv_msg.msg) { + kfree(msg_ch->resp_msg.msg); + return -ENOMEM; + } + + msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL; + msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL; + + return 0; +} + +static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch) +{ + kfree(msg_ch->recv_msg.msg); + kfree(msg_ch->resp_msg.msg); +} + +static int init_mgmt_msg_channel(struct hinic3_mbox *mbox) +{ + int err; + + err = alloc_mbox_msg_channel(&mbox->mgmt_msg); + if (err) { + dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n"); + return err; + } + + err = hinic3_init_mbox_dma_queue(mbox); + if (err) { + dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n"); + free_mbox_msg_channel(&mbox->mgmt_msg); + return err; + } + + return 0; +} + +static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox) +{ + hinic3_uninit_mbox_dma_queue(mbox); + free_mbox_msg_channel(&mbox->mgmt_msg); +} + +static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *mbox; + int err; + + mbox = hwdev->mbox; + mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL); + if (!mbox->func_msg) + return -ENOMEM; + + err = alloc_mbox_msg_channel(mbox->func_msg); + if (err) + goto err_free_func_msg; + + return 0; + +err_free_func_msg: + kfree(mbox->func_msg); + mbox->func_msg = NULL; + + return err; +} + +static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *mbox = hwdev->mbox; + + free_mbox_msg_channel(mbox->func_msg); + kfree(mbox->func_msg); + mbox->func_msg = NULL; +} + +static void prepare_send_mbox(struct hinic3_mbox *mbox) +{ + struct hinic3_send_mbox *send_mbox = &mbox->send_mbox; + + send_mbox->data = MBOX_AREA(mbox->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic3_mbox *mbox) +{ + struct hinic3_send_mbox *send_mbox = &mbox->send_mbox; + struct hinic3_hwdev *hwdev = mbox->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF, + addr_h); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF, + addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic3_mbox *mbox) +{ + struct hinic3_send_mbox *send_mbox = &mbox->send_mbox; + struct hinic3_hwdev *hwdev = mbox->hwdev; + + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF, + 0); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF, + 0); + + dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev, + struct hinic3_mbox *mbox) +{ + mbox->hwdev = hwdev; + mutex_init(&mbox->mbox_send_lock); + spin_lock_init(&mbox->mbox_lock); + + mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME); + if (!mbox->workq) { + dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n"); + return -ENOMEM; + } + hwdev->mbox = mbox; + + return 0; +} + +int hinic3_init_mbox(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *mbox; + int err; + + mbox = kzalloc(sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + err = hinic3_mbox_pre_init(hwdev, mbox); + if (err) + goto err_free_mbox; + + err = init_mgmt_msg_channel(mbox); + if (err) + goto err_destroy_workqueue; + + err = hinic3_init_func_mbox_msg_channel(hwdev); + if (err) + goto err_uninit_mgmt_msg_ch; + + err = alloc_mbox_wb_status(mbox); + if (err) { + dev_err(hwdev->dev, "Failed to alloc mbox write back status\n"); + goto err_uninit_func_mbox_msg_ch; + } + + prepare_send_mbox(mbox); + + return 0; + +err_uninit_func_mbox_msg_ch: + hinic3_uninit_func_mbox_msg_channel(hwdev); + +err_uninit_mgmt_msg_ch: + uninit_mgmt_msg_channel(mbox); + +err_destroy_workqueue: + destroy_workqueue(mbox->workq); + +err_free_mbox: + kfree(mbox); + + return err; +} + +void hinic3_free_mbox(struct hinic3_hwdev *hwdev) +{ + struct hinic3_mbox *mbox = hwdev->mbox; + + destroy_workqueue(mbox->workq); + free_mbox_wb_status(mbox); + hinic3_uninit_func_mbox_msg_channel(hwdev); + uninit_mgmt_msg_channel(mbox); + kfree(mbox); +} + +#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define MBOX_XOR_DATA_ALIGN 4 +static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len) +{ + u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; + u32 dw_len = msg_len / sizeof(u32); + u32 i; + + for (i = 0; i < dw_len; i++) + xor ^= data[i]; + + return xor; +} + +#define MBOX_MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) + +static bool is_msg_queue_full(struct mbox_dma_queue *mq) +{ + return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) == + MBOX_MQ_ID_MASK(mq, (mq)->cons_idx); +} + +static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox, + struct mbox_dma_queue *mq, + struct mbox_dma_msg *dma_msg, + const void *msg, u32 msg_len) +{ + u64 dma_addr, offset; + void *dma_vaddr; + + if (is_msg_queue_full(mq)) { + dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n", + mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx)); + return -EBUSY; + } + + /* copy data to DMA buffer */ + offset = mq->prod_idx * MBOX_MAX_BUF_SZ; + dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset; + memcpy(dma_vaddr, msg, msg_len); + dma_addr = mq->dma_buf_paddr + offset; + dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr)); + dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + dma_msg->msg_len = cpu_to_le32(msg_len); + /* The firmware obtains message based on 4B alignment. */ + dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr, + ALIGN(msg_len, MBOX_XOR_DATA_ALIGN))); + mq->prod_idx++; + mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx); + + return 0; +} + +static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox, + enum mbox_msg_ack_type ack_type, + struct mbox_dma_msg *dma_msg, const void *msg, + u32 msg_len) +{ + struct mbox_dma_queue *mq; + u32 val; + + val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET); + if (ack_type == MBOX_MSG_ACK) { + mq = &mbox->sync_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); + } else { + mq = &mbox->async_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); + } + + return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len); +} + +static void clear_mbox_status(struct hinic3_send_mbox *mbox) +{ + __be64 *wb_status = mbox->wb_vaddr; + + *wb_status = 0; + /* clear mailbox write back status */ + wmb(); +} + +static void mbox_dword_write(const void *src, void __iomem *dst, u32 count) +{ + const __le32 *src32 = src; + u32 __iomem *dst32 = dst; + u32 i; + + /* Data written to mbox is arranged in structs with little endian fields + * but when written to HW every dword (32bits) should be swapped since + * the HW will swap it again. + */ + for (i = 0; i < count; i++) + __raw_writel(swab32((__force __u32)src32[i]), dst32 + i); +} + +static void mbox_copy_header(struct hinic3_hwdev *hwdev, + struct hinic3_send_mbox *mbox, __le64 *header) +{ + mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32)); +} + +static void mbox_copy_send_data(struct hinic3_hwdev *hwdev, + struct hinic3_send_mbox *mbox, void *seg, + u32 seg_len) +{ + u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ); + u32 count, leftover, last_dword; + const __le32 *src = seg; + + count = seg_len / sizeof(u32); + leftover = seg_len % sizeof(u32); + if (count > 0) + mbox_dword_write(src, dst, count); + + if (leftover > 0) { + last_dword = 0; + memcpy(&last_dword, src + count, leftover); + mbox_dword_write(&last_dword, dst + count, 1); + } +} + +static void write_mbox_msg_attr(struct hinic3_mbox *mbox, + u16 dst_func, u16 dst_aeqn, u32 seg_len) +{ + struct hinic3_hwif *hwif = mbox->hwdev->hwif; + u32 mbox_int, mbox_ctrl, tx_size; + + tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2; + + mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) | + MBOX_INT_SET(0, STAT_DMA) | + MBOX_INT_SET(tx_size, TX_SIZE) | + MBOX_INT_SET(0, STAT_DMA_SO_RO) | + MBOX_INT_SET(1, WB_EN); + + mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) | + MBOX_CTRL_SET(0, TRIGGER_AEQE) | + MBOX_CTRL_SET(dst_func, DST_FUNC); + + hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int); + hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF, + mbox_ctrl); +} + +static u16 get_mbox_status(const struct hinic3_send_mbox *mbox) +{ + __be64 *wb_status = mbox->wb_vaddr; + u64 wb_val; + + wb_val = be64_to_cpu(*wb_status); + /* verify reading before check */ + rmb(); + + return wb_val & MBOX_WB_STATUS_ERRCODE_MASK; +} + +static enum hinic3_wait_return check_mbox_wb_status(void *priv_data) +{ + struct hinic3_mbox *mbox = priv_data; + u16 wb_status; + + wb_status = get_mbox_status(&mbox->send_mbox); + + return MBOX_STATUS_FINISHED(wb_status) ? + HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING; +} + +static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header, + u16 dst_func, void *seg, u32 seg_len, void *msg_info) +{ + struct hinic3_send_mbox *send_mbox = &mbox->send_mbox; + struct hinic3_hwdev *hwdev = mbox->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + enum mbox_msg_direction_type dir; + u16 dst_aeqn, wb_status, errcode; + int err; + + /* mbox to mgmt cpu, hardware doesn't care about dst aeq id */ + if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) { + dir = MBOX_MSG_HEADER_GET(header, DIRECTION); + dst_aeqn = (dir == MBOX_MSG_SEND) ? + MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX; + } else { + dst_aeqn = 0; + } + + clear_mbox_status(send_mbox); + mbox_copy_header(hwdev, send_mbox, &header); + mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len); + + err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status, + MBOX_MSG_POLLING_TIMEOUT_MS, + USEC_PER_MSEC); + wb_status = get_mbox_status(send_mbox); + if (err) { + dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n", + wb_status); + return err; + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + dev_err(hwdev->dev, + "Send mailbox segment to function %u error, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return errcode ? errcode : -EFAULT; + } + + return 0; +} + +static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd, + const void *msg, u32 msg_len, u16 dst_func, + enum mbox_msg_direction_type direction, + enum mbox_msg_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE; + struct hinic3_hwdev *hwdev = mbox->hwdev; + struct mbox_dma_msg dma_msg; + u32 seg_len = MBOX_SEG_LEN; + __le64 header = 0; + u32 seq_id = 0; + u16 rsp_aeq_id; + u8 *msg_seg; + int err = 0; + u32 left; + + if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) + rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX; + else + rsp_aeq_id = 0; + + if (dst_func == MBOX_MGMT_FUNC_ID && + !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) { + err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg, + msg, msg_len); + if (err) + goto err_send; + + msg = &dma_msg; + msg_len = sizeof(dma_msg); + data_type = MBOX_MSG_DATA_DMA; + } + + msg_seg = (u8 *)msg; + left = msg_len; + + header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) | + MBOX_MSG_HEADER_SET(mod, MODULE) | + MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) | + MBOX_MSG_HEADER_SET(ack_type, NO_ACK) | + MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) | + MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) | + MBOX_MSG_HEADER_SET(direction, DIRECTION) | + MBOX_MSG_HEADER_SET(cmd, CMD) | + MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | + MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | + MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) | + MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS)); + + while (!(MBOX_MSG_HEADER_GET(header, LAST))) { + if (left <= MBOX_SEG_LEN) { + header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK); + header |= + cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) | + MBOX_MSG_HEADER_SET(1, LAST)); + seg_len = left; + } + + err = send_mbox_seg(mbox, header, dst_func, msg_seg, + seg_len, msg_info); + if (err) { + dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n", + MBOX_MSG_HEADER_GET(header, SEQID)); + goto err_send; + } + + left -= MBOX_SEG_LEN; + msg_seg += MBOX_SEG_LEN; + seq_id++; + header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK); + header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID)); + } + +err_send: + return err; +} + +static void set_mbox_to_func_event(struct hinic3_mbox *mbox, + enum mbox_event_state event_flag) +{ + spin_lock(&mbox->mbox_lock); + mbox->event_flag = event_flag; + spin_unlock(&mbox->mbox_lock); +} + +static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data) +{ + struct hinic3_mbox *mbox = priv_data; + + return (mbox->event_flag == MBOX_EVENT_SUCCESS) ? + HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING; +} + +static int wait_mbox_msg_completion(struct hinic3_mbox *mbox, + u32 timeout) +{ + u32 wait_time; + int err; + + wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS; + err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish, + wait_time, USEC_PER_MSEC); + if (err) { + set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT); + return err; + } + set_mbox_to_func_event(mbox, MBOX_EVENT_END); + + return 0; +} + int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, const struct mgmt_msg_params *msg_params) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_mbox *mbox = hwdev->mbox; + struct mbox_msg_info msg_info = {}; + struct hinic3_msg_desc *msg_desc; + u32 msg_len; + int err; + + /* expect response message */ + msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID); + mutex_lock(&mbox->mbox_send_lock); + msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF; + mbox->send_msg_id = msg_info.msg_id; + set_mbox_to_func_event(mbox, MBOX_EVENT_START); + + err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in, + msg_params->in_size, MBOX_MGMT_FUNC_ID, + MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info); + if (err) { + dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, err); + set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL); + goto err_send; + } + + if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) { + dev_err(hwdev->dev, + "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); + err = -ETIMEDOUT; + goto err_send; + } + + if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) { + dev_err(hwdev->dev, + "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + err = -EFAULT; + goto err_send; + } + + if (msg_desc->msg_info.status) { + err = msg_desc->msg_info.status; + goto err_send; + } + + if (msg_params->buf_out) { + msg_len = le16_to_cpu(msg_desc->msg_len); + if (msg_len != msg_params->expected_out_size) { + dev_err(hwdev->dev, + "Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n", + msg_desc->msg_len, mod, cmd, + msg_params->expected_out_size); + err = -EFAULT; + goto err_send; + } + + memcpy(msg_params->buf_out, msg_desc->msg, msg_len); + } + +err_send: + mutex_unlock(&mbox->mbox_send_lock); + + return err; +} + +int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params) +{ + struct hinic3_mbox *mbox = hwdev->mbox; + struct mbox_msg_info msg_info = {}; + int err; + + mutex_lock(&mbox->mbox_send_lock); + err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in, + msg_params->in_size, MBOX_MGMT_FUNC_ID, + MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info); + if (err) + dev_err(hwdev->dev, "Send mailbox no ack failed\n"); + + mutex_unlock(&mbox->mbox_send_lock); + + return err; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h index d7a6c37b7eff..e71629e95086 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h @@ -8,8 +8,134 @@ #include <linux/mutex.h> struct hinic3_hwdev; +struct mgmt_msg_params; + +#define MBOX_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK GENMASK_ULL(12, 0) +#define MBOX_MSG_HEADER_STATUS_MASK BIT_ULL(13) +#define MBOX_MSG_HEADER_SOURCE_MASK BIT_ULL(15) +#define MBOX_MSG_HEADER_AEQ_ID_MASK GENMASK_ULL(17, 16) +#define MBOX_MSG_HEADER_MSG_ID_MASK GENMASK_ULL(21, 18) +#define MBOX_MSG_HEADER_CMD_MASK GENMASK_ULL(31, 22) +#define MBOX_MSG_HEADER_MSG_LEN_MASK GENMASK_ULL(42, 32) +#define MBOX_MSG_HEADER_MODULE_MASK GENMASK_ULL(47, 43) +#define MBOX_MSG_HEADER_SEG_LEN_MASK GENMASK_ULL(53, 48) +#define MBOX_MSG_HEADER_NO_ACK_MASK BIT_ULL(54) +#define MBOX_MSG_HEADER_DATA_TYPE_MASK BIT_ULL(55) +#define MBOX_MSG_HEADER_SEQID_MASK GENMASK_ULL(61, 56) +#define MBOX_MSG_HEADER_LAST_MASK BIT_ULL(62) +#define MBOX_MSG_HEADER_DIRECTION_MASK BIT_ULL(63) + +#define MBOX_MSG_HEADER_SET(val, member) \ + FIELD_PREP(MBOX_MSG_HEADER_##member##_MASK, val) +#define MBOX_MSG_HEADER_GET(val, member) \ + FIELD_GET(MBOX_MSG_HEADER_##member##_MASK, le64_to_cpu(val)) + +/* identifies if a segment belongs to a message or to a response. A VF is only + * expected to send messages and receive responses. PF driver could receive + * messages and send responses. + */ +enum mbox_msg_direction_type { + MBOX_MSG_SEND = 0, + MBOX_MSG_RESP = 1, +}; + +/* Indicates if mbox message expects a response (ack) or not */ +enum mbox_msg_ack_type { + MBOX_MSG_ACK = 0, + MBOX_MSG_NO_ACK = 1, +}; + +enum mbox_msg_data_type { + MBOX_MSG_DATA_INLINE = 0, + MBOX_MSG_DATA_DMA = 1, +}; + +enum mbox_msg_src_type { + MBOX_MSG_FROM_MBOX = 1, +}; + +enum mbox_msg_aeq_type { + MBOX_MSG_AEQ_FOR_EVENT = 0, + MBOX_MSG_AEQ_FOR_MBOX = 1, +}; + +#define HINIC3_MBOX_WQ_NAME "hinic3_mbox" + +struct mbox_msg_info { + u8 msg_id; + u8 status; +}; + +struct hinic3_msg_desc { + u8 *msg; + __le16 msg_len; + u8 seq_id; + u8 mod; + __le16 cmd; + struct mbox_msg_info msg_info; +}; + +struct hinic3_msg_channel { + struct hinic3_msg_desc resp_msg; + struct hinic3_msg_desc recv_msg; +}; + +struct hinic3_send_mbox { + u8 __iomem *data; + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + MBOX_EVENT_START = 0, + MBOX_EVENT_FAIL = 1, + MBOX_EVENT_SUCCESS = 2, + MBOX_EVENT_TIMEOUT = 3, + MBOX_EVENT_END = 4, +}; + +struct mbox_dma_msg { + __le32 xor; + __le32 dma_addr_high; + __le32 dma_addr_low; + __le32 msg_len; + __le64 rsvd; +}; + +struct mbox_dma_queue { + void *dma_buf_vaddr; + dma_addr_t dma_buf_paddr; + u16 depth; + u16 prod_idx; + u16 cons_idx; +}; + +struct hinic3_mbox { + struct hinic3_hwdev *hwdev; + /* lock for send mbox message and ack message */ + struct mutex mbox_send_lock; + struct hinic3_send_mbox send_mbox; + struct mbox_dma_queue sync_msg_queue; + struct mbox_dma_queue async_msg_queue; + struct workqueue_struct *workq; + /* driver and MGMT CPU */ + struct hinic3_msg_channel mgmt_msg; + /* VF to PF */ + struct hinic3_msg_channel *func_msg; + u8 send_msg_id; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; +}; + +void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header, + u8 size); +int hinic3_init_mbox(struct hinic3_hwdev *hwdev); +void hinic3_free_mbox(struct hinic3_hwdev *hwdev); int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, const struct mgmt_msg_params *msg_params); +int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c new file mode 100644 index 000000000000..c38d10cd7fac --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_eqs.h" +#include "hinic3_hwdev.h" +#include "hinic3_mbox.h" +#include "hinic3_mgmt.h" + +void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev) +{ + if (hwdev->aeqs) + flush_workqueue(hwdev->aeqs->workq); +} + +void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header, + u8 size) +{ + if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) == + MBOX_MSG_FROM_MBOX) + hinic3_mbox_func_aeqe_handler(hwdev, header, size); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h index 4edabeb32112..bbef3b32a6ec 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h @@ -9,5 +9,7 @@ struct hinic3_hwdev; void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev); +void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, + u8 *header, u8 size); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h index c4434efdc7f7..6cc0345c39e4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -56,12 +56,105 @@ struct l2nic_cmd_update_mac { u8 new_mac[ETH_ALEN]; }; +struct l2nic_cmd_set_ci_attr { + struct mgmt_msg_head msg_head; + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 rsvd; + u64 ci_addr; +}; + +struct l2nic_cmd_clear_qp_resource { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd1; +}; + struct l2nic_cmd_force_pkt_drop { struct mgmt_msg_head msg_head; u8 port; u8 rsvd1[3]; }; +struct l2nic_cmd_set_vport_state { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd1; + /* 0--disable, 1--enable */ + u8 state; + u8 rsvd2[3]; +}; + +struct l2nic_cmd_set_dcb_state { + struct mgmt_msg_head head; + u16 func_id; + /* 0 - get dcb state, 1 - set dcb state */ + u8 op_code; + /* 0 - disable, 1 - enable dcb */ + u8 state; + /* 0 - disable, 1 - enable dcb */ + u8 port_state; + u8 rsvd[7]; +}; + +#define L2NIC_RSS_TYPE_VALID_MASK BIT(23) +#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24) +#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25) +#define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26) +#define L2NIC_RSS_TYPE_IPV6_MASK BIT(27) +#define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28) +#define L2NIC_RSS_TYPE_IPV4_MASK BIT(29) +#define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30) +#define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31) +#define L2NIC_RSS_TYPE_SET(val, member) \ + FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val) +#define L2NIC_RSS_TYPE_GET(val, member) \ + FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val) + +#define L2NIC_RSS_INDIR_SIZE 256 +#define L2NIC_RSS_KEY_SIZE 40 + +/* IEEE 802.1Qaz std */ +#define L2NIC_DCB_COS_MAX 0x8 + +struct l2nic_cmd_set_rss_ctx_tbl { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd1; + u32 context; +}; + +struct l2nic_cmd_cfg_rss_engine { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 opcode; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct l2nic_cmd_cfg_rss_hash_key { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 key[L2NIC_RSS_KEY_SIZE]; +}; + +struct l2nic_cmd_cfg_rss { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 rss_en; + u8 rq_priority_number; + u8 prio_tc[L2NIC_DCB_COS_MAX]; + u16 num_qps; + u16 rsvd1; +}; + /* Commands between NIC to fw */ enum l2nic_cmd { /* FUNC CFG */ @@ -82,6 +175,32 @@ enum l2nic_cmd { L2NIC_CMD_MAX = 256, }; +struct l2nic_cmd_rss_set_indir_tbl { + __le32 rsvd[4]; + __le16 entry[L2NIC_RSS_INDIR_SIZE]; +}; + +/* NIC CMDQ MODE */ +enum l2nic_ucode_cmd { + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, + L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1, + L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4, +}; + +/* hilink mac group command */ +enum mag_cmd { + MAG_CMD_GET_LINK_STATUS = 7, +}; + +/* firmware also use this cmd report link event to driver */ +struct mag_cmd_get_link_status { + struct mgmt_msg_head head; + u8 port_id; + /* 0:link down 1:link up */ + u8 status; + u8 rsvd0[2]; +}; + enum hinic3_nic_feature_cap { HINIC3_NIC_F_CSUM = BIT(0), HINIC3_NIC_F_SCTP_CRC = BIT(1), diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c index 71104a6b8bef..0fa3c7900225 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -8,19 +8,437 @@ #include "hinic3_nic_cfg.h" #include "hinic3_nic_dev.h" #include "hinic3_nic_io.h" +#include "hinic3_rss.h" #include "hinic3_rx.h" #include "hinic3_tx.h" +/* try to modify the number of irq to the target number, + * and return the actual number of irq. + */ +static u16 hinic3_qp_irq_change(struct net_device *netdev, + u16 dst_num_qp_irq) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct msix_entry *qps_msix_entries; + u16 resp_irq_num, irq_num_gap, i; + u16 idx; + int err; + + qps_msix_entries = nic_dev->qps_msix_entries; + if (dst_num_qp_irq > nic_dev->num_qp_irq) { + irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; + err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap, + &qps_msix_entries[nic_dev->num_qp_irq], + &resp_irq_num); + if (err) { + netdev_err(netdev, "Failed to alloc irqs\n"); + return nic_dev->num_qp_irq; + } + + nic_dev->num_qp_irq += resp_irq_num; + } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { + irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; + for (i = 0; i < irq_num_gap; i++) { + idx = (nic_dev->num_qp_irq - i) - 1; + hinic3_free_irq(nic_dev->hwdev, + qps_msix_entries[idx].vector); + qps_msix_entries[idx].vector = 0; + qps_msix_entries[idx].entry = 0; + } + nic_dev->num_qp_irq = dst_num_qp_irq; + } + + return nic_dev->num_qp_irq; +} + +static void hinic3_config_num_qps(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *q_params) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 alloc_num_irq, cur_num_irq; + u16 dst_num_irq; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) + q_params->num_qps = 1; + + if (nic_dev->num_qp_irq >= q_params->num_qps) + goto out; + + cur_num_irq = nic_dev->num_qp_irq; + + alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps); + if (alloc_num_irq < q_params->num_qps) { + q_params->num_qps = alloc_num_irq; + netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n", + q_params->num_qps); + + /* The current irq may be in use, we must keep it */ + dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps); + hinic3_qp_irq_change(netdev, dst_num_irq); + } + +out: + netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n", + q_params->num_qps); +} + +static int hinic3_setup_num_qps(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->num_qp_irq = 0; + + nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!nic_dev->qps_msix_entries) + return -ENOMEM; + + hinic3_config_num_qps(netdev, &nic_dev->q_params); + + return 0; +} + +static void hinic3_destroy_num_qps(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + for (i = 0; i < nic_dev->num_qp_irq; i++) + hinic3_free_irq(nic_dev->hwdev, + nic_dev->qps_msix_entries[i].vector); + + kfree(nic_dev->qps_msix_entries); +} + +static int hinic3_alloc_txrxq_resources(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *q_params) +{ + int err; + + q_params->txqs_res = kcalloc(q_params->num_qps, + sizeof(*q_params->txqs_res), GFP_KERNEL); + if (!q_params->txqs_res) + return -ENOMEM; + + q_params->rxqs_res = kcalloc(q_params->num_qps, + sizeof(*q_params->rxqs_res), GFP_KERNEL); + if (!q_params->rxqs_res) { + err = -ENOMEM; + goto err_free_txqs_res_arr; + } + + q_params->irq_cfg = kcalloc(q_params->num_qps, + sizeof(*q_params->irq_cfg), GFP_KERNEL); + if (!q_params->irq_cfg) { + err = -ENOMEM; + goto err_free_rxqs_res_arr; + } + + err = hinic3_alloc_txqs_res(netdev, q_params->num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err) { + netdev_err(netdev, "Failed to alloc txqs resource\n"); + goto err_free_irq_cfg; + } + + err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err) { + netdev_err(netdev, "Failed to alloc rxqs resource\n"); + goto err_free_txqs_res; + } + + return 0; + +err_free_txqs_res: + hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth, + q_params->txqs_res); +err_free_irq_cfg: + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; +err_free_rxqs_res_arr: + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; +err_free_txqs_res_arr: + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; + + return err; +} + +static void hinic3_free_txrxq_resources(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *q_params) +{ + hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth, + q_params->rxqs_res); + hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth, + q_params->txqs_res); + + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; + + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; + + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; +} + +static int hinic3_configure_txrxqs(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *q_params) +{ + int err; + + err = hinic3_configure_txqs(netdev, q_params->num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err) { + netdev_err(netdev, "Failed to configure txqs\n"); + return err; + } + + err = hinic3_configure_rxqs(netdev, q_params->num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err) { + netdev_err(netdev, "Failed to configure rxqs\n"); + return err; + } + + return 0; +} + +static int hinic3_configure(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + netdev->min_mtu = HINIC3_MIN_MTU_SIZE; + netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE; + err = hinic3_set_port_mtu(netdev, netdev->mtu); + if (err) { + netdev_err(netdev, "Failed to set mtu\n"); + return err; + } + + /* Ensure DCB is disabled */ + hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0); + + if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { + err = hinic3_rss_init(netdev); + if (err) { + netdev_err(netdev, "Failed to init rss\n"); + return err; + } + } + + return 0; +} + +static void hinic3_remove_configure(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) + hinic3_rss_uninit(netdev); +} + +static int hinic3_alloc_channel_resources(struct net_device *netdev, + struct hinic3_dyna_qp_params *qp_params, + struct hinic3_dyna_txrxq_params *trxq_params) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + qp_params->num_qps = trxq_params->num_qps; + qp_params->sq_depth = trxq_params->sq_depth; + qp_params->rq_depth = trxq_params->rq_depth; + + err = hinic3_alloc_qps(nic_dev, qp_params); + if (err) { + netdev_err(netdev, "Failed to alloc qps\n"); + return err; + } + + err = hinic3_alloc_txrxq_resources(netdev, trxq_params); + if (err) { + netdev_err(netdev, "Failed to alloc txrxq resources\n"); + hinic3_free_qps(nic_dev, qp_params); + return err; + } + + return 0; +} + +static void hinic3_free_channel_resources(struct net_device *netdev, + struct hinic3_dyna_qp_params *qp_params, + struct hinic3_dyna_txrxq_params *trxq_params) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_free_txrxq_resources(netdev, trxq_params); + hinic3_free_qps(nic_dev, qp_params); +} + +static int hinic3_open_channel(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + err = hinic3_init_qp_ctxts(nic_dev); + if (err) { + netdev_err(netdev, "Failed to init qps\n"); + return err; + } + + err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params); + if (err) { + netdev_err(netdev, "Failed to configure txrxqs\n"); + goto err_free_qp_ctxts; + } + + err = hinic3_qps_irq_init(netdev); + if (err) { + netdev_err(netdev, "Failed to init txrxq irq\n"); + goto err_free_qp_ctxts; + } + + err = hinic3_configure(netdev); + if (err) { + netdev_err(netdev, "Failed to init txrxq irq\n"); + goto err_uninit_qps_irq; + } + + return 0; + +err_uninit_qps_irq: + hinic3_qps_irq_uninit(netdev); +err_free_qp_ctxts: + hinic3_free_qp_ctxts(nic_dev); + + return err; +} + +static void hinic3_close_channel(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_remove_configure(netdev); + hinic3_qps_irq_uninit(netdev); + hinic3_free_qp_ctxts(nic_dev); +} + +static int hinic3_vport_up(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + bool link_status_up; + u16 glb_func_id; + int err; + + glb_func_id = hinic3_global_func_id(nic_dev->hwdev); + err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true); + if (err) { + netdev_err(netdev, "Failed to enable vport\n"); + goto err_flush_qps_res; + } + + err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps, + nic_dev->q_params.num_qps); + if (err) { + netdev_err(netdev, "Failed to set real number of queues\n"); + goto err_flush_qps_res; + } + netif_tx_start_all_queues(netdev); + + err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up); + if (!err && link_status_up) + netif_carrier_on(netdev); + + return 0; + +err_flush_qps_res: + hinic3_flush_qps_res(nic_dev->hwdev); + /* wait to guarantee that no packets will be sent to host */ + msleep(100); + + return err; +} + +static void hinic3_vport_down(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 glb_func_id; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + glb_func_id = hinic3_global_func_id(nic_dev->hwdev); + hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false); + + hinic3_flush_txqs(netdev); + /* wait to guarantee that no packets will be sent to host */ + msleep(100); + hinic3_flush_qps_res(nic_dev->hwdev); +} + static int hinic3_open(struct net_device *netdev) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_qp_params qp_params; + int err; + + err = hinic3_init_nicio_res(nic_dev); + if (err) { + netdev_err(netdev, "Failed to init nicio resources\n"); + return err; + } + + err = hinic3_setup_num_qps(netdev); + if (err) { + netdev_err(netdev, "Failed to setup num_qps\n"); + goto err_free_nicio_res; + } + + err = hinic3_alloc_channel_resources(netdev, &qp_params, + &nic_dev->q_params); + if (err) + goto err_destroy_num_qps; + + hinic3_init_qps(nic_dev, &qp_params); + + err = hinic3_open_channel(netdev); + if (err) + goto err_uninit_qps; + + err = hinic3_vport_up(netdev); + if (err) + goto err_close_channel; + + return 0; + +err_close_channel: + hinic3_close_channel(netdev); +err_uninit_qps: + hinic3_uninit_qps(nic_dev, &qp_params); + hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params); +err_destroy_num_qps: + hinic3_destroy_num_qps(netdev); +err_free_nicio_res: + hinic3_free_nicio_res(nic_dev); + + return err; } static int hinic3_close(struct net_device *netdev) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_qp_params qp_params; + + hinic3_vport_down(netdev); + hinic3_close_channel(netdev); + hinic3_uninit_qps(nic_dev, &qp_params); + hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params); + + return 0; } static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c index 5b1a91a18c67..979f47ca77f9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -39,6 +39,12 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode, return 0; } +int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev) +{ + return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET, + &nic_dev->nic_io->feature_cap, 1); +} + int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev) { return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET, @@ -82,6 +88,23 @@ static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap, return 0; } +int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct l2nic_func_tbl_cfg func_tbl_cfg = {}; + u32 cfg_bitmap; + + func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ + func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len; + + cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) | + BIT(L2NIC_FUNC_TBL_CFG_MTU) | + BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE); + + return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap, + &func_tbl_cfg); +} + int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); @@ -89,6 +112,7 @@ int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) struct hinic3_hwdev *hwdev = nic_dev->hwdev; func_tbl_cfg.mtu = new_mtu; + return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU), &func_tbl_cfg); } @@ -206,6 +230,63 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, err, mac_info.msg_head.status); return -EIO; } + + return 0; +} + +int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr) +{ + struct l2nic_cmd_set_ci_attr cons_idx_attr = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + cons_idx_attr.func_idx = hinic3_global_func_id(hwdev); + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.ci_addr = attr->ci_dma_base; + + mgmt_msg_params_init_default(&msg_params, &cons_idx_attr, + sizeof(cons_idx_attr)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params); + if (err || cons_idx_attr.msg_head.status) { + dev_err(hwdev->dev, + "Failed to set ci attribute table, err: %d, status: 0x%x\n", + err, cons_idx_attr.msg_head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev) +{ + struct l2nic_cmd_clear_qp_resource sq_res = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + sq_res.func_id = hinic3_global_func_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_CLEAR_QP_RESOURCE, + &msg_params); + if (err || sq_res.msg_head.status) { + dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n", + err, sq_res.msg_head.status); + return -EINVAL; + } + return 0; } @@ -231,3 +312,74 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev) return pkt_drop.msg_head.status; } + +int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state) +{ + struct l2nic_cmd_set_dcb_state dcb_state = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + dcb_state.op_code = op_code; + dcb_state.state = state; + dcb_state.func_id = hinic3_global_func_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &dcb_state, + sizeof(dcb_state)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_QOS_DCB_STATE, &msg_params); + if (err || dcb_state.head.status) { + dev_err(hwdev->dev, + "Failed to set dcb state, err: %d, status: 0x%x\n", + err, dcb_state.head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up) +{ + struct mag_cmd_get_link_status get_link = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + get_link.port_id = hinic3_physical_port_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK, + MAG_CMD_GET_LINK_STATUS, &msg_params); + if (err || get_link.head.status) { + dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n", + err, get_link.head.status); + return -EIO; + } + + *link_status_up = !!get_link.status; + + return 0; +} + +int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id, + bool enable) +{ + struct l2nic_cmd_set_vport_state en_state = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + en_state.func_id = func_id; + en_state.state = enable ? 1 : 0; + + mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_VPORT_ENABLE, &msg_params); + if (err || en_state.msg_head.status) { + dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n", + err, en_state.msg_head.status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h index bf9ce51dc401..b83b567fa542 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -22,11 +22,23 @@ enum hinic3_nic_event_type { HINIC3_NIC_EVENT_LINK_UP = 1, }; +struct hinic3_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev); int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev); bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, enum hinic3_nic_feature_cap feature_bits); void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap); +int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev); int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu); int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, @@ -36,6 +48,14 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id); +int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, + struct hinic3_sq_attr *attr); +int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev); int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev); +int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state); +int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up); +int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id, + bool enable); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h index c994fc9b6ee0..5ba83261616c 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -51,6 +51,12 @@ struct hinic3_dyna_txrxq_params { struct hinic3_irq_cfg *irq_cfg; }; +struct hinic3_intr_coal_info { + u8 pending_limit; + u8 coalesce_timer_cfg; + u8 resend_timer_cfg; +}; + struct hinic3_nic_dev { struct pci_dev *pdev; struct net_device *netdev; @@ -67,16 +73,21 @@ struct hinic3_nic_dev { struct hinic3_txq *txqs; struct hinic3_rxq *rxqs; + enum hinic3_rss_hash_type rss_hash_type; + struct hinic3_rss_type rss_type; + u8 *rss_hkey; + u16 *rss_indir; + u16 num_qp_irq; struct msix_entry *qps_msix_entries; + struct hinic3_intr_coal_info *intr_coalesce; + bool link_status_up; }; void hinic3_set_netdev_ops(struct net_device *netdev); - -/* Temporary prototypes. Functions become static in later submission. */ -void qp_add_napi(struct hinic3_irq_cfg *irq_cfg); -void qp_del_napi(struct hinic3_irq_cfg *irq_cfg); +int hinic3_qps_irq_init(struct net_device *netdev); +void hinic3_qps_irq_uninit(struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c index 34a1f5bd5ac1..d86cd1ba4605 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. +#include "hinic3_cmdq.h" #include "hinic3_hw_comm.h" #include "hinic3_hw_intf.h" #include "hinic3_hwdev.h" @@ -9,13 +10,876 @@ #include "hinic3_nic_dev.h" #include "hinic3_nic_io.h" +#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1 +#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1 +#define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF) +#define HINIC3_DEFAULT_DROP_THD_OFF 0 + +#define HINIC3_CI_Q_ADDR_SIZE (64) + +#define HINIC3_CI_TABLE_SIZE(num_qps) \ + (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE)) + +#define HINIC3_CI_VADDR(base_addr, q_id) \ + ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE) + +#define HINIC3_CI_PADDR(base_paddr, q_id) \ + ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE) + +#define SQ_WQ_PREFETCH_MAX 1 +#define SQ_WQ_PREFETCH_MIN 1 +#define SQ_WQ_PREFETCH_THRESHOLD 16 + +#define RQ_WQ_PREFETCH_MAX 4 +#define RQ_WQ_PREFETCH_MIN 1 +#define RQ_WQ_PREFETCH_THRESHOLD 256 + +/* (2048 - 8) / 64 */ +#define HINIC3_Q_CTXT_MAX 31 + +enum hinic3_qp_ctxt_type { + HINIC3_QP_CTXT_TYPE_SQ = 0, + HINIC3_QP_CTXT_TYPE_RQ = 1, +}; + +struct hinic3_qp_ctxt_hdr { + __le16 num_queues; + __le16 queue_type; + __le16 start_qid; + __le16 rsvd; +}; + +struct hinic3_sq_ctxt { + __le32 ci_pi; + __le32 drop_mode_sp; + __le32 wq_pfn_hi_owner; + __le32 wq_pfn_lo; + + __le32 rsvd0; + __le32 pkt_drop_thd; + __le32 global_sq_id; + __le32 vlan_ceq_attr; + + __le32 pref_cache; + __le32 pref_ci_owner; + __le32 pref_wq_pfn_hi_ci; + __le32 pref_wq_pfn_lo; + + __le32 rsvd8; + __le32 rsvd9; + __le32 wq_block_pfn_hi; + __le32 wq_block_pfn_lo; +}; + +struct hinic3_rq_ctxt { + __le32 ci_pi; + __le32 ceq_attr; + __le32 wq_pfn_hi_type_owner; + __le32 wq_pfn_lo; + + __le32 rsvd[3]; + __le32 cqe_sge_len; + + __le32 pref_cache; + __le32 pref_ci_owner; + __le32 pref_wq_pfn_hi_ci; + __le32 pref_wq_pfn_lo; + + __le32 pi_paddr_hi; + __le32 pi_paddr_lo; + __le32 wq_block_pfn_hi; + __le32 wq_block_pfn_lo; +}; + +struct hinic3_sq_ctxt_block { + struct hinic3_qp_ctxt_hdr cmdq_hdr; + struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX]; +}; + +struct hinic3_rq_ctxt_block { + struct hinic3_qp_ctxt_hdr cmdq_hdr; + struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX]; +}; + +struct hinic3_clean_queue_ctxt { + struct hinic3_qp_ctxt_hdr cmdq_hdr; + __le32 rsvd; +}; + +#define SQ_CTXT_SIZE(num_sqs) \ + (sizeof(struct hinic3_qp_ctxt_hdr) + \ + (num_sqs) * sizeof(struct hinic3_sq_ctxt)) + +#define RQ_CTXT_SIZE(num_rqs) \ + (sizeof(struct hinic3_qp_ctxt_hdr) + \ + (num_rqs) * sizeof(struct hinic3_rq_ctxt)) + +#define SQ_CTXT_PREF_CI_HI_SHIFT 12 +#define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT) + +#define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0) +#define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16) +#define SQ_CTXT_CI_PI_SET(val, member) \ + FIELD_PREP(SQ_CTXT_##member##_MASK, val) + +#define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0) +#define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1) +#define SQ_CTXT_MODE_SET(val, member) \ + FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0) +#define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23) +#define SQ_CTXT_WQ_PAGE_SET(val, member) \ + FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val) + +#define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0) +#define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16) +#define SQ_CTXT_PKT_DROP_THD_SET(val, member) \ + FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val) + +#define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0) +#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \ + FIELD_PREP(SQ_CTXT_##member##_MASK, val) + +#define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19) +#define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23) +#define SQ_CTXT_VLAN_CEQ_SET(val, member) \ + FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0) +#define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14) +#define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25) + +#define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0) +#define SQ_CTXT_PREF_OWNER_MASK BIT(4) + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0) +#define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20) +#define SQ_CTXT_PREF_SET(val, member) \ + FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val) + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0) +#define SQ_CTXT_WQ_BLOCK_SET(val, member) \ + FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val) + +#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0) +#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16) +#define RQ_CTXT_CI_PI_SET(val, member) \ + FIELD_PREP(RQ_CTXT_##member##_MASK, val) + +#define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21) +#define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31) +#define RQ_CTXT_CEQ_ATTR_SET(val, member) \ + FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0) +#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28) +#define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31) +#define RQ_CTXT_WQ_PAGE_SET(val, member) \ + FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val) + +#define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28) +#define RQ_CTXT_CQE_LEN_SET(val, member) \ + FIELD_PREP(RQ_CTXT_##member##_MASK, val) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0) +#define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14) +#define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25) + +#define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0) +#define RQ_CTXT_PREF_OWNER_MASK BIT(4) + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0) +#define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20) +#define RQ_CTXT_PREF_SET(val, member) \ + FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val) + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0) +#define RQ_CTXT_WQ_BLOCK_SET(val, member) \ + FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_nic_io *nic_io; + int err; + + nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL); + if (!nic_io) + return -ENOMEM; + + nic_dev->nic_io = nic_io; + + err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1); + if (err) { + dev_err(hwdev->dev, "Failed to set function svc used state\n"); + goto err_free_nicio; + } + + err = hinic3_init_function_table(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to init function table\n"); + goto err_clear_func_svc_used_state; + } + + nic_io->rx_buf_len = nic_dev->rx_buf_len; + + err = hinic3_get_nic_feature_from_hw(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to get nic features\n"); + goto err_clear_func_svc_used_state; + } + + nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK; + nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE; + dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap); + + return 0; + +err_clear_func_svc_used_state: + hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0); +err_free_nicio: + nic_dev->nic_io = NULL; + kfree(nic_io); + + return err; } void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev) { - /* Completed by later submission due to LoC limit. */ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + + hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0); + nic_dev->nic_io = NULL; + kfree(nic_io); +} + +int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + void __iomem *db_base; + int err; + + nic_io->max_qps = hinic3_func_max_qnum(hwdev); + + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); + if (err) { + dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n"); + return err; + } + nic_io->sqs_db_addr = db_base; + + err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); + if (err) { + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); + dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n"); + return err; + } + nic_io->rqs_db_addr = db_base; + + nic_io->ci_vaddr_base = + dma_alloc_coherent(hwdev->dev, + HINIC3_CI_TABLE_SIZE(nic_io->max_qps), + &nic_io->ci_dma_base, + GFP_KERNEL); + if (!nic_io->ci_vaddr_base) { + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); + hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr); + return -ENOMEM; + } + + return 0; +} + +void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + + dma_free_coherent(hwdev->dev, + HINIC3_CI_TABLE_SIZE(nic_io->max_qps), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr); + hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr); +} + +static int hinic3_create_sq(struct hinic3_hwdev *hwdev, + struct hinic3_io_queue *sq, + u16 q_id, u32 sq_depth, u16 sq_msix_idx) +{ + int err; + + /* sq used & hardware request init 1 */ + sq->owner = 1; + + sq->q_id = q_id; + sq->msix_entry_idx = sq_msix_idx; + + err = hinic3_wq_create(hwdev, &sq->wq, sq_depth, + BIT(HINIC3_SQ_WQEBB_SHIFT)); + if (err) { + dev_err(hwdev->dev, "Failed to create tx queue %u wq\n", + q_id); + return err; + } + + return 0; +} + +static int hinic3_create_rq(struct hinic3_hwdev *hwdev, + struct hinic3_io_queue *rq, + u16 q_id, u32 rq_depth, u16 rq_msix_idx) +{ + int err; + + rq->q_id = q_id; + rq->msix_entry_idx = rq_msix_idx; + + err = hinic3_wq_create(hwdev, &rq->wq, rq_depth, + BIT(HINIC3_RQ_WQEBB_SHIFT + + HINIC3_NORMAL_RQ_WQE)); + if (err) { + dev_err(hwdev->dev, "Failed to create rx queue %u wq\n", + q_id); + return err; + } + + return 0; +} + +static int hinic3_create_qp(struct hinic3_hwdev *hwdev, + struct hinic3_io_queue *sq, + struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth, + u32 rq_depth, u16 qp_msix_idx) +{ + int err; + + err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx); + if (err) { + dev_err(hwdev->dev, "Failed to create sq, qid: %u\n", + q_id); + return err; + } + + err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx); + if (err) { + dev_err(hwdev->dev, "Failed to create rq, qid: %u\n", + q_id); + goto err_destroy_sq_wq; + } + + return 0; + +err_destroy_sq_wq: + hinic3_wq_destroy(hwdev, &sq->wq); + + return err; +} + +static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev, + struct hinic3_io_queue *sq, + struct hinic3_io_queue *rq) +{ + hinic3_wq_destroy(hwdev, &sq->wq); + hinic3_wq_destroy(hwdev, &rq->wq); +} + +int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params) +{ + struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries; + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_io_queue *sqs; + struct hinic3_io_queue *rqs; + u16 q_id; + int err; + + if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps) + return -EINVAL; + + sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL); + if (!sqs) { + err = -ENOMEM; + goto err_out; + } + + rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL); + if (!rqs) { + err = -ENOMEM; + goto err_free_sqs; + } + + for (q_id = 0; q_id < qp_params->num_qps; q_id++) { + err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id, + qp_params->sq_depth, qp_params->rq_depth, + qps_msix_entries[q_id].entry); + if (err) { + dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n", + q_id, err); + goto err_destroy_qp; + } + } + + qp_params->sqs = sqs; + qp_params->rqs = rqs; + + return 0; + +err_destroy_qp: + while (q_id > 0) { + q_id--; + hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]); + } + kfree(rqs); +err_free_sqs: + kfree(sqs); +err_out: + return err; +} + +void hinic3_free_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params) +{ + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + u16 q_id; + + for (q_id = 0; q_id < qp_params->num_qps; q_id++) + hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id], + &qp_params->rqs[q_id]); + + kfree(qp_params->sqs); + kfree(qp_params->rqs); +} + +void hinic3_init_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_io_queue *sqs = qp_params->sqs; + struct hinic3_io_queue *rqs = qp_params->rqs; + u16 q_id; + + nic_io->num_qps = qp_params->num_qps; + nic_io->sq = qp_params->sqs; + nic_io->rq = qp_params->rqs; + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + sqs[q_id].cons_idx_addr = + (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id); + /* clear ci value */ + WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0); + + sqs[q_id].db_addr = nic_io->sqs_db_addr; + rqs[q_id].db_addr = nic_io->rqs_db_addr; + } +} + +void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + + qp_params->sqs = nic_io->sq; + qp_params->rqs = nic_io->rq; + qp_params->num_qps = nic_io->num_qps; +} + +static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr, + enum hinic3_qp_ctxt_type ctxt_type, + u16 num_queues, u16 q_id) +{ + qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type); + qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues); + qp_ctxt_hdr->start_qid = cpu_to_le16(q_id); + qp_ctxt_hdr->rsvd = 0; +} + +static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, + struct hinic3_sq_ctxt *sq_ctxt) +{ + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u16 pi_start, ci_start; + + ci_start = hinic3_get_sq_local_ci(sq); + pi_start = hinic3_get_sq_local_pi(sq); + + wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ci_pi = + cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + SQ_CTXT_CI_PI_SET(pi_start, PI_IDX)); + + sq_ctxt->drop_mode_sp = + cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) | + SQ_CTXT_MODE_SET(0, PKT_DROP)); + + sq_ctxt->wq_pfn_hi_owner = + cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(1, OWNER)); + + sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); + + sq_ctxt->pkt_drop_thd = + cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) | + SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF)); + + sq_ctxt->global_sq_id = + cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id, + GLOBAL_SQ_ID)); + + /* enable insert c-vlan by default */ + sq_ctxt->vlan_ceq_attr = + cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | + SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE)); + + sq_ctxt->rsvd0 = 0; + + sq_ctxt->pref_cache = + cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD)); + + sq_ctxt->pref_ci_owner = + cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) | + SQ_CTXT_PREF_SET(1, OWNER)); + + sq_ctxt->pref_wq_pfn_hi_ci = + cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI)); + + sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); + + sq_ctxt->wq_block_pfn_hi = + cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI)); + + sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo); +} + +static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq, + u32 *wq_page_pfn_hi, + u32 *wq_page_pfn_lo, + u32 *wq_block_pfn_hi, + u32 *wq_block_pfn_lo) +{ + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + + wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + *wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + *wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); + *wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + *wq_block_pfn_lo = lower_32_bits(wq_block_pfn); +} + +static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, + struct hinic3_rq_ctxt *rq_ctxt) +{ + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE; + pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE; + + hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo, + &wq_block_pfn_hi, &wq_block_pfn_lo); + + rq_ctxt->ci_pi = + cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + RQ_CTXT_CI_PI_SET(pi_start, PI_IDX)); + + rq_ctxt->ceq_attr = + cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR)); + + rq_ctxt->wq_pfn_hi_type_owner = + cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(1, OWNER)); + + /* use 16Byte WQE */ + rq_ctxt->wq_pfn_hi_type_owner |= + cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE)); + rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN)); + + rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); + + rq_ctxt->pref_cache = + cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD)); + + rq_ctxt->pref_ci_owner = + cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) | + RQ_CTXT_PREF_SET(1, OWNER)); + + rq_ctxt->pref_wq_pfn_hi_ci = + cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI_LOW)); + + rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo); + + rq_ctxt->wq_block_pfn_hi = + cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI)); + + rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo); +} + +static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_sq_ctxt_block *sq_ctxt_block; + u16 q_id, curr_id, max_ctxts, i; + struct hinic3_sq_ctxt *sq_ctxt; + struct hinic3_cmd_buf *cmd_buf; + struct hinic3_io_queue *sq; + __le64 out_param; + int err = 0; + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + sq_ctxt_block = cmd_buf->buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, + HINIC3_QP_CTXT_TYPE_SQ, max_ctxts, + q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + sq = &nic_io->sq[curr_id]; + hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); + } + + hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block)); + + cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts)); + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX, + cmd_buf, &out_param); + if (err || out_param) { + dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic3_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_rq_ctxt_block *rq_ctxt_block; + u16 q_id, curr_id, max_ctxts, i; + struct hinic3_rq_ctxt *rq_ctxt; + struct hinic3_cmd_buf *cmd_buf; + struct hinic3_io_queue *rq; + __le64 out_param; + int err = 0; + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + rq_ctxt_block = cmd_buf->buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, + HINIC3_QP_CTXT_TYPE_RQ, max_ctxts, + q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + rq = &nic_io->rq[curr_id]; + hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]); + } + + hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block)); + + cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts)); + + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX, + cmd_buf, &out_param); + if (err || out_param) { + dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic3_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev) +{ + int err; + + err = init_sq_ctxts(nic_dev); + if (err) + return err; + + err = init_rq_ctxts(nic_dev); + if (err) + return err; + + return 0; +} + +static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev, + enum hinic3_qp_ctxt_type ctxt_type) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_clean_queue_ctxt *ctxt_block; + struct hinic3_cmd_buf *cmd_buf; + __le64 out_param; + int err; + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps); + ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type); + ctxt_block->cmdq_hdr.start_qid = 0; + ctxt_block->cmdq_hdr.rsvd = 0; + ctxt_block->rsvd = 0; + + hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block)); + + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, + L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX, + cmd_buf, &out_param); + if (err || out_param) { + dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + } + + hinic3_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev) +{ + /* clean LRO/TSO context space */ + return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) || + clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ); +} + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev) +{ + struct hinic3_nic_io *nic_io = nic_dev->nic_io; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + struct hinic3_sq_attr sq_attr; + u32 rq_depth; + u16 q_id; + int err; + + err = init_qp_ctxts(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to init QP ctxts\n"); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n"); + return err; + } + + rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE; + + err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth, + nic_io->rx_buf_len); + if (err) { + dev_err(hwdev->dev, "Failed to set root context\n"); + return err; + } + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + sq_attr.ci_dma_base = + HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2; + sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT; + sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic3_set_ci_table(hwdev, &sq_attr); + if (err) { + dev_err(hwdev->dev, "Failed to set ci table\n"); + goto err_clean_root_ctxt; + } + } + + return 0; + +err_clean_root_ctxt: + hinic3_clean_root_ctxt(hwdev); + + return err; +} + +void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev) +{ + hinic3_clean_root_ctxt(nic_dev->hwdev); } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h index 865ba6878c48..12eefabcf1db 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -75,8 +75,8 @@ static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq) #define DB_CFLAG_DP_RQ 1 struct hinic3_nic_db { - u32 db_info; - u32 pi_hi; + __le32 db_info; + __le32 pi_hi; }; static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, @@ -84,15 +84,25 @@ static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, { struct hinic3_nic_db db; - db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) | - DB_INFO_SET(cflag, CFLAG) | - DB_INFO_SET(cos, COS) | - DB_INFO_SET(queue->q_id, QID); - db.pi_hi = DB_PI_HIGH(pi); + db.db_info = + cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) | + DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | + DB_INFO_SET(queue->q_id, QID)); + db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi)); writeq(*((u64 *)&db), DB_ADDR(queue, pi)); } +struct hinic3_dyna_qp_params { + u16 num_qps; + u32 sq_depth; + u32 rq_depth; + + struct hinic3_io_queue *sqs; + struct hinic3_io_queue *rqs; +}; + struct hinic3_nic_io { struct hinic3_io_queue *sq; struct hinic3_io_queue *rq; @@ -117,4 +127,19 @@ struct hinic3_nic_io { int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev); void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev); +int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev); +void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev); + +int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params); +void hinic3_free_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params); +void hinic3_init_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params); +void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev, + struct hinic3_dyna_qp_params *qp_params); + +int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev); +void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h new file mode 100644 index 000000000000..86c88d0bb4bd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_PCI_ID_TBL_H_ +#define _HINIC3_PCI_ID_TBL_H_ + +#define PCI_DEV_ID_HINIC3_VF 0x375F + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c new file mode 100644 index 000000000000..4ff1b2f79838 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/ethtool.h> + +#include "hinic3_cmdq.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_rss.h" + +static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 i, num_qps; + + num_qps = nic_dev->q_params.num_qps; + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) + indir[i] = ethtool_rxfh_indir_default(i, num_qps); +} + +static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps) +{ + struct mgmt_msg_params msg_params = {}; + struct l2nic_cmd_cfg_rss rss_cfg = {}; + int err; + + rss_cfg.func_id = hinic3_global_func_id(hwdev); + rss_cfg.rss_en = rss_en; + rss_cfg.rq_priority_number = 0; + rss_cfg.num_qps = num_qps; + + mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_CFG_RSS, &msg_params); + if (err || rss_cfg.msg_head.status) { + dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n", + err, rss_cfg.msg_head.status); + return -EINVAL; + } + + return 0; +} + +static void hinic3_init_rss_parameters(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_type.udp_ipv4 = 1; +} + +static void decide_num_qps(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int dev_cpus; + + dev_cpus = netif_get_num_default_rss_queues(); + nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps); +} + +static int alloc_rss_resource(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE, + sizeof(nic_dev->rss_hkey[0]), + GFP_KERNEL); + if (!nic_dev->rss_hkey) + return -ENOMEM; + + netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE); + + nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16), + GFP_KERNEL); + if (!nic_dev->rss_indir) { + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev, + const u16 *indir_table) +{ + struct l2nic_cmd_rss_set_indir_tbl *indir_tbl; + struct hinic3_cmd_buf *cmd_buf; + __le64 out_param; + int err; + u32 i; + + cmd_buf = hinic3_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + dev_err(hwdev->dev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl)); + indir_tbl = cmd_buf->buf; + memset(indir_tbl, 0, sizeof(*indir_tbl)); + + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = cpu_to_le16(indir_table[i]); + + hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl)); + + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC, + L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL, + cmd_buf, &out_param); + if (err || out_param) { + dev_err(hwdev->dev, "Failed to set rss indir table\n"); + err = -EFAULT; + } + + hinic3_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev, + struct hinic3_rss_type rss_type) +{ + struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {}; + struct mgmt_msg_params msg_params = {}; + u32 ctx; + int err; + + ctx_tbl.func_id = hinic3_global_func_id(hwdev); + ctx = L2NIC_RSS_TYPE_SET(1, VALID) | + L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + ctx_tbl.context = ctx; + + mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params); + + if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) { + return MGMT_STATUS_CMD_UNSUPPORTED; + } else if (err || ctx_tbl.msg_head.status) { + dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n", + err, ctx_tbl.msg_head.status); + return -EINVAL; + } + + return 0; +} + +static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode, + enum hinic3_rss_hash_type *type) +{ + struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + hash_type_cmd.func_id = hinic3_global_func_id(hwdev); + hash_type_cmd.opcode = opcode; + + if (opcode == MGMT_MSG_CMD_OP_SET) + hash_type_cmd.hash_engine = *type; + + mgmt_msg_params_init_default(&msg_params, &hash_type_cmd, + sizeof(hash_type_cmd)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_CFG_RSS_HASH_ENGINE, + &msg_params); + if (err || hash_type_cmd.msg_head.status) { + dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n", + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", + err, hash_type_cmd.msg_head.status); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + *type = hash_type_cmd.hash_engine; + + return 0; +} + +static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev, + enum hinic3_rss_hash_type type) +{ + return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type); +} + +static int hinic3_config_rss_hw_resource(struct net_device *netdev, + u16 *indir_tbl) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); + if (err) + return err; + + err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); + if (err) + return err; + + return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type); +} + +static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode, + u8 *key) +{ + struct l2nic_cmd_cfg_rss_hash_key hash_key = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + hash_key.func_id = hinic3_global_func_id(hwdev); + hash_key.opcode = opcode; + + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE); + + mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params); + if (err || hash_key.msg_head.status) { + dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n", + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", + err, hash_key.msg_head.status); + return -EINVAL; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE); + + return 0; +} + +static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key) +{ + return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key); +} + +static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); + if (err) + return err; + + hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir); + + err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir); + if (err) + return err; + + err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps); + if (err) + return err; + + return 0; +} + +int hinic3_rss_init(struct net_device *netdev) +{ + return hinic3_set_hw_rss_parameters(netdev, 1); +} + +void hinic3_rss_uninit(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_rss_cfg(nic_dev->hwdev, 0, 0); +} + +void hinic3_clear_rss_config(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + + kfree(nic_dev->rss_indir); + nic_dev->rss_indir = NULL; +} + +void hinic3_try_to_enable_rss(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + nic_dev->max_qps = hinic3_func_max_qnum(hwdev); + if (nic_dev->max_qps <= 1 || + !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS)) + goto err_reset_q_params; + + err = alloc_rss_resource(netdev); + if (err) { + nic_dev->max_qps = 1; + goto err_reset_q_params; + } + + set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); + decide_num_qps(netdev); + hinic3_init_rss_parameters(netdev); + err = hinic3_set_hw_rss_parameters(netdev, 0); + if (err) { + dev_err(hwdev->dev, "Failed to set hardware rss parameters\n"); + hinic3_clear_rss_config(netdev); + nic_dev->max_qps = 1; + goto err_reset_q_params; + } + + return; + +err_reset_q_params: + clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); + nic_dev->q_params.num_qps = nic_dev->max_qps; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h new file mode 100644 index 000000000000..78d82c2aca06 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_RSS_H_ +#define _HINIC3_RSS_H_ + +#include <linux/netdevice.h> + +int hinic3_rss_init(struct net_device *netdev); +void hinic3_rss_uninit(struct net_device *netdev); +void hinic3_try_to_enable_rss(struct net_device *netdev); +void hinic3_clear_rss_config(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c index 860163e9d66c..16c00c3bb1ed 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -35,13 +35,35 @@ int hinic3_alloc_rxqs(struct net_device *netdev) { - /* Completed by later submission due to LoC limit. */ - return -EFAULT; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u16 num_rxqs = nic_dev->max_qps; + struct hinic3_rxq *rxq; + u16 q_id; + + nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL); + if (!nic_dev->rxqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_rxqs; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rxq->netdev = netdev; + rxq->dev = &pdev->dev; + rxq->q_id = q_id; + rxq->buf_len = nic_dev->rx_buf_len; + rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len); + rxq->q_depth = nic_dev->q_params.rq_depth; + rxq->q_mask = nic_dev->q_params.rq_depth - 1; + } + + return 0; } void hinic3_free_rxqs(struct net_device *netdev) { - /* Completed by later submission due to LoC limit. */ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->rxqs); } static int rx_alloc_mapped_page(struct page_pool *page_pool, @@ -50,6 +72,9 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool, struct page *page; u32 page_offset; + if (likely(rx_info->page)) + return 0; + page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len); if (unlikely(!page)) return -ENOMEM; @@ -60,14 +85,35 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool, return 0; } +/* Associate fixed completion element to every wqe in the rq. Every rq wqe will + * always post completion to the same place. + */ +static void rq_associate_cqes(struct hinic3_rxq *rxq) +{ + struct hinic3_queue_pages *qpages; + struct hinic3_rq_wqe *rq_wqe; + dma_addr_t cqe_dma; + u32 i; + + qpages = &rxq->rq->wq.qpages; + + for (i = 0; i < rxq->q_depth; i++) { + rq_wqe = get_q_element(qpages, i, NULL); + cqe_dma = rxq->cqe_start_paddr + + i * sizeof(struct hinic3_rq_cqe); + rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma)); + rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma)); + } +} + static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx, dma_addr_t dma_addr, u16 len) { struct hinic3_rq_wqe *rq_wqe; rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); - rq_wqe->buf_hi_addr = upper_32_bits(dma_addr); - rq_wqe->buf_lo_addr = lower_32_bits(dma_addr); + rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr)); + rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr)); } static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) @@ -102,6 +148,41 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) return i; } +static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres, + u32 rq_depth, u16 buf_len) +{ + u32 free_wqebbs = rq_depth - 1; + u32 idx; + int err; + + for (idx = 0; idx < free_wqebbs; idx++) { + err = rx_alloc_mapped_page(rqres->page_pool, + &rqres->rx_info[idx], buf_len); + if (err) + break; + } + + return idx; +} + +static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres, + u32 q_depth) +{ + struct hinic3_rx_info *rx_info; + u32 i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < q_depth; i++) { + rx_info = &rqres->rx_info[i]; + + if (rx_info->page) { + page_pool_put_full_page(rqres->page_pool, + rx_info->page, false); + rx_info->page = NULL; + } + } +} + static void hinic3_add_rx_frag(struct hinic3_rxq *rxq, struct hinic3_rx_info *rx_info, struct sk_buff *skb, u32 size) @@ -279,7 +360,7 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, if (skb_is_nonlinear(skb)) hinic3_pull_tail(skb); - offload_type = rx_cqe->offload_type; + offload_type = le32_to_cpu(rx_cqe->offload_type); hinic3_rx_csum(rxq, offload_type, status, skb); num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); @@ -299,6 +380,135 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, return 0; } +int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) +{ + u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct page_pool_params pp_params = {}; + struct hinic3_dyna_rxq_res *rqres; + u32 pkt_idx; + int idx; + + for (idx = 0; idx < num_rq; idx++) { + rqres = &rxqs_res[idx]; + rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info), + GFP_KERNEL); + if (!rqres->rx_info) + goto err_free_rqres; + + rqres->cqe_start_vaddr = + dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, + &rqres->cqe_start_paddr, GFP_KERNEL); + if (!rqres->cqe_start_vaddr) { + netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n", + idx); + goto err_free_rx_info; + } + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = rq_depth * nic_dev->rx_buf_len / + PAGE_SIZE; + pp_params.nid = dev_to_node(&nic_dev->pdev->dev); + pp_params.dev = &nic_dev->pdev->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pp_params.max_len = PAGE_SIZE; + rqres->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rqres->page_pool)) { + netdev_err(netdev, "Failed to create rxq%d page pool\n", + idx); + goto err_free_cqe; + } + + pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth, + nic_dev->rx_buf_len); + if (!pkt_idx) { + netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n", + idx); + goto err_destroy_page_pool; + } + rqres->next_to_alloc = pkt_idx; + } + + return 0; + +err_destroy_page_pool: + page_pool_destroy(rqres->page_pool); +err_free_cqe: + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); +err_free_rx_info: + kfree(rqres->rx_info); +err_free_rqres: + hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res); + + return -ENOMEM; +} + +void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) +{ + u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_rxq_res *rqres; + int idx; + + for (idx = 0; idx < num_rq; idx++) { + rqres = &rxqs_res[idx]; + + hinic3_free_rx_buffers(rqres, rq_depth); + page_pool_destroy(rqres->page_pool); + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + kfree(rqres->rx_info); + } +} + +int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_rxq_res *rqres; + struct msix_entry *msix_entry; + struct hinic3_rxq *rxq; + u16 q_id; + u32 pkts; + + for (q_id = 0; q_id < num_rq; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rqres = &rxqs_res[q_id]; + msix_entry = &nic_dev->qps_msix_entries[q_id]; + + rxq->irq_id = msix_entry->vector; + rxq->msix_entry_idx = msix_entry->entry; + rxq->next_to_update = 0; + rxq->next_to_alloc = rqres->next_to_alloc; + rxq->q_depth = rq_depth; + rxq->delta = rxq->q_depth; + rxq->q_mask = rxq->q_depth - 1; + rxq->cons_idx = 0; + + rxq->cqe_arr = rqres->cqe_start_vaddr; + rxq->cqe_start_paddr = rqres->cqe_start_paddr; + rxq->rx_info = rqres->rx_info; + rxq->page_pool = rqres->page_pool; + + rxq->rq = &nic_dev->nic_io->rq[rxq->q_id]; + + rq_associate_cqes(rxq); + + pkts = hinic3_rx_fill_buffers(rxq); + if (!pkts) { + netdev_err(netdev, "Failed to fill Rx buffer\n"); + return -ENOMEM; + } + } + + return 0; +} + int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) { struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); @@ -311,14 +521,14 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) while (likely(nr_pkts < budget)) { sw_ci = rxq->cons_idx & rxq->q_mask; rx_cqe = rxq->cqe_arr + sw_ci; - status = rx_cqe->status; + status = le32_to_cpu(rx_cqe->status); if (!RQ_CQE_STATUS_GET(status, RXDONE)) break; /* make sure we read rx_done before packet length */ rmb(); - vlan_len = rx_cqe->vlan_len; + vlan_len = le32_to_cpu(rx_cqe->vlan_len); pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN); if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) break; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index 1cca21858d40..44ae841a3648 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -27,21 +27,21 @@ /* RX Completion information that is provided by HW for a specific RX WQE */ struct hinic3_rq_cqe { - u32 status; - u32 vlan_len; - u32 offload_type; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; - u32 rsvd6; - u32 pkt_info; + __le32 status; + __le32 vlan_len; + __le32 offload_type; + __le32 rsvd3; + __le32 rsvd4; + __le32 rsvd5; + __le32 rsvd6; + __le32 pkt_info; }; struct hinic3_rq_wqe { - u32 buf_hi_addr; - u32 buf_lo_addr; - u32 cqe_hi_addr; - u32 cqe_lo_addr; + __le32 buf_hi_addr; + __le32 buf_lo_addr; + __le32 cqe_hi_addr; + __le32 cqe_lo_addr; }; struct hinic3_rx_info { @@ -82,9 +82,23 @@ struct hinic3_rxq { dma_addr_t cqe_start_paddr; } ____cacheline_aligned; +struct hinic3_dyna_rxq_res { + u16 next_to_alloc; + struct hinic3_rx_info *rx_info; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + struct page_pool *page_pool; +}; + int hinic3_alloc_rxqs(struct net_device *netdev); void hinic3_free_rxqs(struct net_device *netdev); +int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); +void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); +int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, + u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c index 3f7f73430be4..92c43c05e3f2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -55,9 +55,9 @@ void hinic3_free_txqs(struct net_device *netdev) static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs, dma_addr_t addr, u32 len) { - buf_descs->hi_addr = upper_32_bits(addr); - buf_descs->lo_addr = lower_32_bits(addr); - buf_descs->len = len; + buf_descs->hi_addr = cpu_to_le32(upper_32_bits(addr)); + buf_descs->lo_addr = cpu_to_le32(lower_32_bits(addr)); + buf_descs->len = cpu_to_le32(len); } static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, @@ -81,10 +81,10 @@ static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, dma_info[0].len = skb_headlen(skb); - wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); - wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); + wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma)); + wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma)); - wqe_desc->ctrl_len = dma_info[0].len; + wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &(skb_shinfo(skb)->frags[i]); @@ -116,6 +116,7 @@ err_unmap_page: } dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); + return err; } @@ -138,6 +139,23 @@ static void hinic3_tx_unmap_skb(struct net_device *netdev, dma_info[0].len, DMA_TO_DEVICE); } +static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth, + struct hinic3_tx_info *tx_info_arr) +{ + struct hinic3_tx_info *tx_info; + u32 idx; + + for (idx = 0; idx < sq_depth; idx++) { + tx_info = &tx_info_arr[idx]; + if (tx_info->skb) { + hinic3_tx_unmap_skb(netdev, tx_info->skb, + tx_info->dma_info); + dev_kfree_skb_any(tx_info->skb); + tx_info->skb = NULL; + } + } +} + union hinic3_ip { struct iphdr *v4; struct ipv6hdr *v6; @@ -197,7 +215,8 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, union hinic3_ip ip; u8 l4_proto; - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, + TUNNEL_FLAG)); ip.hdr = skb_network_header(skb); if (ip.v4->version == 4) { @@ -226,7 +245,7 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, } } - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN)); return 1; } @@ -255,26 +274,28 @@ static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, } } -static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, +static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info, enum hinic3_l4_offload_type l4_offload, u32 offset, u32 mss) { if (l4_offload == HINIC3_L4_OFFLOAD_TCP) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO)); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, + INNER_L4_EN)); } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO)); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, + INNER_L4_EN)); } /* enable L3 calculation */ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN)); - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF)); /* set MSS value */ - *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); + *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK); + *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS)); } static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) @@ -284,7 +305,7 @@ static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); } -static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, +static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info, struct sk_buff *skb) { enum hinic3_l4_offload_type l4_offload; @@ -305,15 +326,17 @@ static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, if (skb->encapsulation) { u32 gso_type = skb_shinfo(skb)->gso_type; /* L3 checksum is always enabled */ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN)); + task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, + TUNNEL_FLAG)); l4.hdr = skb_transport_header(skb); ip.hdr = skb_network_header(skb); if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN); + task->pkt_info0 |= + cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN)); } ip.hdr = skb_inner_network_header(skb); @@ -343,13 +366,14 @@ static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task, * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU, * 4=select TPID4 in IPSU */ - task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | - SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | - SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID); + task->vlan_offload = + cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | + SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID)); } static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, - u32 *queue_info, struct hinic3_txq *txq) + __le32 *queue_info, struct hinic3_txq *txq) { u32 offload = 0; int tso_cs_en; @@ -440,39 +464,41 @@ static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq, } static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo, - u32 queue_info, int nr_descs, u16 owner) + __le32 queue_info, int nr_descs, u16 owner) { struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { wqe_desc->ctrl_len |= - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); + cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER)); /* compact wqe queue_info will transfer to chip */ wqe_desc->queue_info = 0; return; } - wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | - SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); + wqe_desc->ctrl_len |= + cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER)); wqe_desc->queue_info = queue_info; - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC); + wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC)); if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { wqe_desc->queue_info |= - SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS); + cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS)); } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < HINIC3_TX_MSS_MIN) { /* mss should not be less than 80 */ - wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + wqe_desc->queue_info &= + cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK); wqe_desc->queue_info |= - SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS); + cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS)); } } @@ -482,12 +508,13 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, { struct hinic3_sq_wqe_combo wqe_combo = {}; struct hinic3_tx_info *tx_info; - u32 offload, queue_info = 0; struct hinic3_sq_task task; u16 wqebb_cnt, num_sge; + __le32 queue_info = 0; u16 saved_wq_prod_idx; u16 owner, pi = 0; u8 saved_sq_owner; + u32 offload; int err; if (unlikely(skb->len < MIN_SKB_LEN)) { @@ -575,6 +602,7 @@ netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) err_drop_pkt: dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } @@ -624,6 +652,90 @@ void hinic3_flush_txqs(struct net_device *netdev) #define HINIC3_BDS_PER_SQ_WQEBB \ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc)) +int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) +{ + struct hinic3_dyna_txq_res *tqres; + int idx; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info), + GFP_KERNEL); + if (!tqres->tx_info) + goto err_free_tqres; + + tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB + + HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds), + GFP_KERNEL); + if (!tqres->bds) { + kfree(tqres->tx_info); + goto err_free_tqres; + } + } + + return 0; + +err_free_tqres: + while (idx > 0) { + idx--; + tqres = &txqs_res[idx]; + + kfree(tqres->bds); + kfree(tqres->tx_info); + } + + return -ENOMEM; +} + +void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) +{ + struct hinic3_dyna_txq_res *tqres; + int idx; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + free_all_tx_skbs(netdev, sq_depth, tqres->tx_info); + kfree(tqres->bds); + kfree(tqres->tx_info); + } +} + +int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_txq_res *tqres; + struct hinic3_txq *txq; + u16 q_id; + u32 idx; + + for (q_id = 0; q_id < num_sq; q_id++) { + txq = &nic_dev->txqs[q_id]; + tqres = &txqs_res[q_id]; + + txq->q_depth = sq_depth; + txq->q_mask = sq_depth - 1; + + txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS, + sq_depth / 20); + txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS, + sq_depth / 10); + + txq->tx_info = tqres->tx_info; + for (idx = 0; idx < sq_depth; idx++) + txq->tx_info[idx].dma_info = + &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB]; + + txq->sq = &nic_dev->nic_io->sq[q_id]; + } + + return 0; +} + bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) { struct net_device *netdev = txq->netdev; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h index 9e505cc19dd5..7e1b872ba752 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h @@ -58,7 +58,7 @@ enum hinic3_tx_offload_type { #define SQ_CTRL_QUEUE_INFO_SET(val, member) \ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) #define SQ_CTRL_QUEUE_INFO_GET(val, member) \ - FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) + FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val)) #define SQ_CTRL_MAX_PLDOFF 221 @@ -77,17 +77,17 @@ enum hinic3_tx_offload_type { FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val) struct hinic3_sq_wqe_desc { - u32 ctrl_len; - u32 queue_info; - u32 hi_addr; - u32 lo_addr; + __le32 ctrl_len; + __le32 queue_info; + __le32 hi_addr; + __le32 lo_addr; }; struct hinic3_sq_task { - u32 pkt_info0; - u32 ip_identify; - u32 rsvd; - u32 vlan_offload; + __le32 pkt_info0; + __le32 ip_identify; + __le32 rsvd; + __le32 vlan_offload; }; struct hinic3_sq_wqe_combo { @@ -125,9 +125,21 @@ struct hinic3_txq { struct hinic3_io_queue *sq; } ____cacheline_aligned; +struct hinic3_dyna_txq_res { + struct hinic3_tx_info *tx_info; + struct hinic3_dma_info *bds; +}; + int hinic3_alloc_txqs(struct net_device *netdev); void hinic3_free_txqs(struct net_device *netdev); +int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); +void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); +int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, + u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); + netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); bool hinic3_tx_poll(struct hinic3_txq *txq, int budget); void hinic3_flush_txqs(struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c index 2ac7efcd1365..bc3ffdc25cf6 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c @@ -6,6 +6,110 @@ #include "hinic3_hwdev.h" #include "hinic3_wq.h" +#define WQ_MIN_DEPTH 64 +#define WQ_MAX_DEPTH 65536 +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE) + +static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq) +{ + struct hinic3_queue_pages *qpages = &wq->qpages; + int i; + + if (hinic3_wq_is_0_level_cla(wq)) { + wq->wq_block_paddr = qpages->pages[0].align_paddr; + wq->wq_block_vaddr = qpages->pages[0].align_vaddr; + + return 0; + } + + if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) { + dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n", + WQ_MAX_NUM_PAGES); + return -EFAULT; + } + + wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev, + HINIC3_MIN_PAGE_SIZE, + &wq->wq_block_paddr, + GFP_KERNEL); + if (!wq->wq_block_vaddr) + return -ENOMEM; + + for (i = 0; i < qpages->num_pages; i++) + wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr); + + return 0; +} + +static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq) +{ + int err; + + err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0); + if (err) + return err; + + err = wq_init_wq_block(hwdev, wq); + if (err) { + hinic3_queue_pages_free(hwdev, &wq->qpages); + return err; + } + + return 0; +} + +static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq) +{ + if (!hinic3_wq_is_0_level_cla(wq)) + dma_free_coherent(hwdev->dev, + HINIC3_MIN_PAGE_SIZE, + wq->wq_block_vaddr, + wq->wq_block_paddr); + + hinic3_queue_pages_free(hwdev, &wq->qpages); +} + +int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq, + u32 q_depth, u16 wqebb_size) +{ + u32 wq_page_size; + + if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || + !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) { + dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n", + q_depth, wqebb_size); + return -EINVAL; + } + + wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE); + + memset(wq, 0, sizeof(*wq)); + wq->q_depth = q_depth; + wq->idx_mask = q_depth - 1; + + hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size); + + return wq_alloc_pages(hwdev, wq); +} + +void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq) +{ + wq_free_pages(hwdev, wq); +} + +void hinic3_wq_reset(struct hinic3_wq *wq) +{ + struct hinic3_queue_pages *qpages = &wq->qpages; + u16 pg_idx; + + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) + memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size); +} + void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs, u16 *prod_idx, struct hinic3_sq_bufdesc **first_part_wqebbs, @@ -27,3 +131,8 @@ void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL); } } + +bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq) +{ + return wq->qpages.num_pages == 1; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h index ab37893efd7e..9b3f012bec80 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h @@ -10,10 +10,10 @@ struct hinic3_sq_bufdesc { /* 31-bits Length, L2NIC only uses length[17:0] */ - u32 len; - u32 rsvd; - u32 hi_addr; - u32 lo_addr; + __le32 len; + __le32 rsvd; + __le32 hi_addr; + __le32 lo_addr; }; /* Work queue is used to submit elements (tx, rx, cmd) to hw. @@ -59,6 +59,7 @@ static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi) { *pi = wq->prod_idx & wq->idx_mask; wq->prod_idx++; + return get_q_element(&wq->qpages, *pi, NULL); } @@ -67,10 +68,20 @@ static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs) wq->cons_idx += num_wqebbs; } +static inline u64 hinic3_wq_get_first_wqe_page_addr(const struct hinic3_wq *wq) +{ + return wq->qpages.pages[0].align_paddr; +} + +int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq, + u32 q_depth, u16 wqebb_size); +void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq); +void hinic3_wq_reset(struct hinic3_wq *wq); void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs, u16 *prod_idx, struct hinic3_sq_bufdesc **first_part_wqebbs, struct hinic3_sq_bufdesc **second_part_wqebbs, u16 *first_part_wqebbs_num); +bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq); #endif |