diff options
author | Quinn Tran <qutran@marvell.com> | 2022-12-22 07:39:30 +0300 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2023-01-12 07:48:26 +0300 |
commit | 1f8f9c34127e9fae20c29a2b57f56fd47dbb43e4 (patch) | |
tree | 136bd8d34344d88818f649b691d0f3e17f854745 /drivers/scsi/qla2xxx/qla_mid.c | |
parent | 129a7c40294fd4ab9e9bccf76e8002818f492d8a (diff) | |
download | linux-1f8f9c34127e9fae20c29a2b57f56fd47dbb43e4.tar.xz |
scsi: qla2xxx: edif: Reduce memory usage during low I/O
For edif, each I/O requires a secondary buffer to carry the FCP
cmnd. During high traffic time, these buffers are cached in the qpair. As
traffic dies down, these buffers will be trimmed as needed. If traffic is
reduced to none over 2 consecutive intervals, then these buffers will be
further trimmed.
Free FCP cmnd buffers to reduce memory usage during slow I/O time.
Signed-off-by: Quinn Tran <qutran@marvell.com>
Signed-off-by: Nilesh Javali <njavali@marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_mid.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_mid.c | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 5976a2f036e6..c6ca39b8e23d 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1170,6 +1170,7 @@ again: dsc->buf = qp->buf_pool.buf_array[tag] = buf; dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; + qp->buf_pool.num_alloc++; } else { dsc->buf = qp->buf_pool.buf_array[tag]; dsc->buf_dma = qp->buf_pool.dma_array[tag]; @@ -1185,14 +1186,107 @@ again: return 0; } +void qla_trim_buf(struct qla_qpair *qp, u16 trim) +{ + int i, j; + struct qla_hw_data *ha = qp->vha->hw; + + if (!trim) + return; + + for (i = 0; i < trim; i++) { + j = qp->buf_pool.num_alloc - 1; + if (test_bit(j, qp->buf_pool.buf_map)) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, + "QP id(%d): trim active buf[%d]. Remain %d bufs\n", + qp->id, j, qp->buf_pool.num_alloc); + return; + } + + if (qp->buf_pool.buf_array[j]) { + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], + qp->buf_pool.dma_array[j]); + qp->buf_pool.buf_array[j] = NULL; + qp->buf_pool.dma_array[j] = 0; + } + qp->buf_pool.num_alloc--; + if (!qp->buf_pool.num_alloc) + break; + } + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, + "QP id(%d): trimmed %d bufs. Remain %d bufs\n", + qp->id, trim, qp->buf_pool.num_alloc); +} + +void __qla_adjust_buf(struct qla_qpair *qp) +{ + u32 trim; + + qp->buf_pool.take_snapshot = 0; + qp->buf_pool.prev_max = qp->buf_pool.max_used; + qp->buf_pool.max_used = qp->buf_pool.num_active; + + if (qp->buf_pool.prev_max > qp->buf_pool.max_used && + qp->buf_pool.num_alloc > qp->buf_pool.max_used) { + /* down trend */ + trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; + trim = (trim * 10) / 100; + trim = trim ? trim : 1; + qla_trim_buf(qp, trim); + } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { + /* 2 periods of no io */ + qla_trim_buf(qp, qp->buf_pool.num_alloc); + } +} /* it is assume qp->qp_lock is held at this point */ void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) { if (dsc->tag == TAG_FREED) return; + lockdep_assert_held(qp->qp_lock_ptr); clear_bit(dsc->tag, qp->buf_pool.buf_map); qp->buf_pool.num_active--; dsc->tag = TAG_FREED; + + if (qp->buf_pool.take_snapshot) + __qla_adjust_buf(qp); +} + +#define EXPIRE (60 * HZ) +void qla_adjust_buf(struct scsi_qla_host *vha) +{ + unsigned long flags; + int i; + struct qla_qpair *qp; + + if (vha->vp_idx) + return; + + if (!vha->buf_expired) { + vha->buf_expired = jiffies + EXPIRE; + return; + } + if (time_before(jiffies, vha->buf_expired)) + return; + + vha->buf_expired = jiffies + EXPIRE; + + for (i = 0; i < vha->hw->num_qpairs; i++) { + qp = vha->hw->queue_pair_map[i]; + if (!qp) + continue; + if (!qp->buf_pool.num_alloc) + continue; + + if (qp->buf_pool.take_snapshot) { + /* no io has gone through in the last EXPIRE period */ + spin_lock_irqsave(qp->qp_lock_ptr, flags); + __qla_adjust_buf(qp); + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + } else { + qp->buf_pool.take_snapshot = 1; + } + } } |