diff options
author | Mintz, Yuval <Yuval.Mintz@cavium.com> | 2016-11-29 17:47:06 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-30 22:32:04 +0300 |
commit | 3da7a37ae6886cfba9ef35428eb976fc2ef561fa (patch) | |
tree | 5ab07de3f47dab8e4d1ea95c822d373e168beb2e /drivers/net/ethernet/qlogic/qed/qed_vf.c | |
parent | 567b3c127a79277bac31a9609734b355d30e7905 (diff) | |
download | linux-3da7a37ae6886cfba9ef35428eb976fc2ef561fa.tar.xz |
qed*: Handle-based L2-queues.
The driver needs to maintain several FW/HW-indices for each one of
its queues. Currently, that mapping is done by the QED where it uses
an rx/tx array of so-called hw-cids, populating them whenever a new
queue is opened and clearing them upon destruction of said queues.
This maintenance is far from ideal - there's no real reason why
QED needs to maintain such a data-structure. It becomes even worse
when considering the fact that the PF's queues and its child VFs' queues
are all mapped into the same data-structure.
As a by-product, the set of parameters an interface needs to supply for
queue APIs is non-trivial, and some of the variables in the API
structures have different meaning depending on their exact place
in the configuration flow.
This patch re-organizes the way L2 queues are configured and maintained.
In short:
- Required parameters for queue init are now well-defined.
- Qed would allocate a queue-cid based on parameters.
Upon initialization success, it would return a handle to caller.
- Queue-handle would be maintained by entity requesting queue-init,
not necessarily qed.
- All further queue-APIs [update, destroy] would use the opaque
handle as reference for the queue instead of various indices.
The possible owners of such handles:
- PF queues [qede] - complete handles based on provided configuration.
- VF queues [qede] - fw-context-less handles, containing only relative
information; Only the PF-side would need the absolute indices
for configuration, so they're omitted here.
- VF queues [qed, PF-side] - complete handles based on VF initialization.
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_vf.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_vf.c | 90 |
1 files changed, 44 insertions, 46 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 3c0633642f4c..60b31a8ede73 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -388,18 +388,18 @@ free_p_iov: #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) -int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, - u8 rx_qid, - u16 sb, - u8 sb_index, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, void __iomem **pp_prod) +int +qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_rxq_tlv *req; + u8 rx_qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ @@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; /* If PF is legacy, we'll need to calculate producers ourselves * as well as clean them. */ - if (pp_prod && p_iov->b_pre_fp_hsi) { + if (p_iov->b_pre_fp_hsi) { u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; u32 init_prod_val = 0; - *pp_prod = (u8 __iomem *)p_hwfn->regview + - MSTORM_QZONE_START(p_hwfn->cdev) + - hw_qid * MSTORM_QZONE_SIZE; + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, } /* Learn the address of the producer from the response */ - if (pp_prod && !p_iov->b_pre_fp_hsi) { + if (!p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; @@ -462,7 +463,8 @@ exit: return rc; } -int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, bool cqe_completion) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_rxqs_tlv *req; @@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); - req->rx_qid = rx_qid; + req->rx_qid = p_cid->rel.queue_id; req->num_rxqs = 1; req->cqe_completion = cqe_completion; @@ -496,28 +498,28 @@ exit: return rc; } -int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, - u16 tx_queue_id, - u16 sb, - u8 sb_index, - dma_addr_t pbl_addr, - u16 pbl_size, void __iomem **pp_doorbell) +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; + u16 qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); - req->tx_qid = tx_queue_id; + req->tx_qid = qid; /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = sb; - req->sb_index = sb_index; + req->hw_sb = p_cid->rel.sb; + req->sb_index = p_cid->rel.sb_idx; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, goto exit; } - if (pp_doorbell) { - /* Modern PFs provide the actual offsets, while legacy - * provided only the queue id. - */ - if (!p_iov->b_pre_fp_hsi) { - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - resp->offset; - } else { - u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; - u32 db_addr; - - db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - db_addr; - } + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[qid]; - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - tx_queue_id, *pp_doorbell, resp->offset); + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr_vf(cid, + DQ_DEMS_LEGACY); } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + qid, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } -int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_txqs_tlv *req; @@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); - req->tx_qid = tx_qid; + req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; /* add list termination tlv */ |