diff options
author | James Smart <jsmart2021@gmail.com> | 2019-08-15 02:57:09 +0300 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2019-08-20 05:41:12 +0300 |
commit | d79c9e9d4b3d9330ee38f392a7c98e0fc494f7f8 (patch) | |
tree | 088e0dc43e6f8415f9bd529e49a3e072c89631d9 /drivers/scsi/lpfc/lpfc_sli.c | |
parent | e62245d923caebc02582b12ce861c3d780b4106f (diff) | |
download | linux-d79c9e9d4b3d9330ee38f392a7c98e0fc494f7f8.tar.xz |
scsi: lpfc: Support dynamic unbounded SGL lists on G7 hardware.
Typical SLI-4 hardware supports up to 2 4KB pages to be registered per XRI
to contain the exchanges Scatter/Gather List. This caps the number of SGL
elements that can be in the SGL. There are not extensions to extend the
list out of the 2 pages.
The G7 hardware adds a SGE type that allows the SGL to be vectored to a
different scatter/gather list segment. And that segment can contain a SGE
to go to another segment and so on. The initial segment must still be
pre-registered for the XRI, but it can be a much smaller amount (256Bytes)
as it can now be dynamically grown. This much smaller allocation can
handle the SG list for most normal I/O, and the dynamic aspect allows it to
support many MB's if needed.
The implementation creates a pool which contains "segments" and which is
initially sized to hold the initial small segment per xri. If an I/O
requires additional segments, they are allocated from the pool. If the
pool has no more segments, the pool is grown based on what is now
needed. After the I/O completes, the additional segments are returned to
the pool for use by other I/Os. Once allocated, the additional segments are
not released under the assumption of "if needed once, it will be needed
again". Pools are kept on a per-hardware queue basis, which is typically
1:1 per cpu, but may be shared by multiple cpus.
The switch to the smaller initial allocation significantly reduces the
memory footprint of the driver (which only grows if large ios are
issued). Based on the several K of XRIs for the adapter, the 8KB->256B
reduction can conserve 32MBs or more.
It has been observed with per-cpu resource pools that allocating a resource
on CPU A, may be put back on CPU B. While the get routines are distributed
evenly, only a limited subset of CPUs may be handling the put routines.
This can put a strain on the lpfc_put_cmd_rsp_buf_per_cpu routine because
all the resources are being put on a limited subset of CPUs.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 292 |
1 files changed, 292 insertions, 0 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 940dd82a265b..f4beb9104d3a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -20233,6 +20233,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); } + + if (phba->cfg_xpsgl && !phba->nvmet_support && + !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + + if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); } /** @@ -20447,3 +20454,288 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, return lpfc_cmd; } + +/** + * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to append the SGL chunk + * + * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, + * and will allocate an SGL chunk if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to sli4_hybrid_sgl - Success + **/ +struct sli4_hybrid_sgl * +lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct sli4_hybrid_sgl *allocated_sgl = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + + spin_lock_irq(&hdwq->hdwq_lock); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the sgl_list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_sgl_xtra_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irq(&hdwq->hdwq_lock); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(smp_processor_id())); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8353 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_ATOMIC, &tmp->dma_phys_sgl); + if (!tmp->dma_sgl) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8354 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + spin_lock_irq(&hdwq->hdwq_lock); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); + } + + allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, + struct sli4_hybrid_sgl, + list_node); + + spin_unlock_irq(&hdwq->hdwq_lock); + + return allocated_sgl; +} + +/** + * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the SGL chunk + * + * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + + spin_lock_irq(&hdwq->hdwq_lock); + + if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_sgl_xtra_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irq(&hdwq->hdwq_lock); + return rc; +} + +/** + * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup sgl buff resources on + * + * This routine frees all SGL chunks of hdwq SGL chunk pool. + * + * Return codes: + * None + **/ +void +lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->sgl_list; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + + spin_lock_irq(&hdwq->hdwq_lock); + + /* Free sgl pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + list_entry->dma_sgl, + list_entry->dma_phys_sgl); + list_del(&list_entry->list_node); + kfree(list_entry); + } + + spin_unlock_irq(&hdwq->hdwq_lock); +} + +/** + * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer + * + * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, + * and will allocate an CMD/RSP buffer if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to fcp_cmd_rsp_buf - Success + **/ +struct fcp_cmd_rsp_buf * +lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct fcp_cmd_rsp_buf *allocated_buf = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + + spin_lock_irq(&hdwq->hdwq_lock); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_cmd_rsp_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irq(&hdwq->hdwq_lock); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(smp_processor_id())); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8355 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool, + GFP_ATOMIC, + &tmp->fcp_cmd_rsp_dma_handle); + + if (!tmp->fcp_cmnd) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8356 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + spin_lock_irq(&hdwq->hdwq_lock); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); + } + + allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, + struct fcp_cmd_rsp_buf, + list_node); + + spin_unlock_irq(&hdwq->hdwq_lock); + + return allocated_buf; +} + +/** + * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the CMD/RSP buf + * + * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + + spin_lock_irq(&hdwq->hdwq_lock); + + if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_cmd_rsp_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irq(&hdwq->hdwq_lock); + return rc; +} + +/** + * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup cmd rsp buff resources on + * + * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. + * + * Return codes: + * None + **/ +void +lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + + spin_lock_irq(&hdwq->hdwq_lock); + + /* Free cmd_rsp buf pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, + list_entry->fcp_cmnd, + list_entry->fcp_cmd_rsp_dma_handle); + list_del(&list_entry->list_node); + kfree(list_entry); + } + + spin_unlock_irq(&hdwq->hdwq_lock); +} |