summaryrefslogtreecommitdiff
path: root/drivers/dma/bcm-sba-raid.c
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@broadcom.com>2017-08-22 12:57:00 +0300
committerVinod Koul <vinod.koul@intel.com>2017-08-28 14:14:24 +0300
commit6df8f913d229e459730fffff7e78ac331140955e (patch)
tree158de7cfb961b8ca67c987cf38245ef848843dce /drivers/dma/bcm-sba-raid.c
parenteb67744b9af736073d6dffb7fb139f3f05d7e6a8 (diff)
downloadlinux-6df8f913d229e459730fffff7e78ac331140955e.tar.xz
dmaengine: bcm-sba-raid: Peek mbox when we have no free requests
When setting up RAID array on several NVMe disks we observed that sba_alloc_request() start failing (due to no free requests left) and RAID array setup becomes very slow. To improve performance, we do mbox channel peek when we have no free requests. This improves performance of RAID array setup because mbox requests that were completed but not processed by mbox completion worker will be processed immediately by mbox channel peek. Signed-off-by: Anup Patel <anup.patel@broadcom.com> Reviewed-by: Ray Jui <ray.jui@broadcom.com> Reviewed-by: Scott Branden <scott.branden@broadcom.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/bcm-sba-raid.c')
-rw-r--r--drivers/dma/bcm-sba-raid.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 67c53c691af1..c5baaa37b0fe 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -200,6 +200,14 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */
+static void sba_peek_mchans(struct sba_device *sba)
+{
+ int mchan_idx;
+
+ for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
+ mbox_client_peek_data(sba->mchans[mchan_idx]);
+}
+
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
unsigned long flags;
@@ -211,8 +219,17 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
if (req)
list_move_tail(&req->node, &sba->reqs_alloc_list);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
- if (!req)
+
+ if (!req) {
+ /*
+ * We have no more free requests so, we peek
+ * mailbox channels hoping few active requests
+ * would have completed which will create more
+ * room for new requests.
+ */
+ sba_peek_mchans(sba);
return NULL;
+ }
req->flags = SBA_REQUEST_STATE_ALLOCED;
req->first = req;
@@ -560,17 +577,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- int mchan_idx;
enum dma_status ret;
struct sba_device *sba = to_sba_device(dchan);
- for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
- mbox_client_peek_data(sba->mchans[mchan_idx]);
-
ret = dma_cookie_status(dchan, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
+ sba_peek_mchans(sba);
+
return dma_cookie_status(dchan, cookie, txstate);
}