summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath10k/htc.c
diff options
context:
space:
mode:
authorWen Gong <wgong@codeaurora.org>2020-04-21 15:09:35 +0300
committerKalle Valo <kvalo@codeaurora.org>2020-04-22 09:43:29 +0300
commitc8334512f3dd1b94844baca629f9bedca4271593 (patch)
tree1580bbc910b49d6b2aa967b8e7d61279d92414bd /drivers/net/wireless/ath/ath10k/htc.c
parentd81709346ceac7b9131b4a091197b9d7fc5a409f (diff)
downloadlinux-c8334512f3dd1b94844baca629f9bedca4271593.tar.xz
ath10k: add htt TX bundle for sdio
The transmission utilization ratio for sdio bus for small packet is slow, because the space and time cost for sdio bus is same for large length packet and small length packet. So the speed of data for large length packet is higher than small length. Test result of different length of data: data packet(byte) cost time(us) calculated rate(Mbps) 256 28 73 512 33 124 1024 35 234 1792 45 318 14336 168 682 28672 333 688 57344 660 695 This patch change the TX packet from single packet to a large length bundle packet, max size is 32, it results in significant performance improvement on TX path. Also there's a fourth thread "ath10k_tx_complete_wq" added to ath10k as it improves TCP RX throughput (values in Mbps): TCP-RX TCP-TX UDP-RX UDP-TX use workqueue_tx_complete 423 357 448 412 change it to ar->workqueue 410 360 449 414 change it to ar->workqueue_aux 405 339 446 401 This patch only effect sdio chip, it will not effect PCI, SNOC etc. It only enable bundle for sdio chip. Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00017-QCARMSWP-1. Signed-off-by: Wen Gong <wgong@codeaurora.org> Signed-off-by: Kalle Valo <kvalo@codeaurora.org> Link: https://lore.kernel.org/r/20200410061400.14231-2-wgong@codeaurora.org
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htc.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c372
1 files changed, 342 insertions, 30 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 61ee413d902a..ed4e0add997e 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -51,10 +51,12 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
+ struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
+ hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
@@ -63,6 +65,11 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
return;
}
+ if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
@@ -78,7 +85,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
- if (ep->tx_credit_flow_enabled)
+ if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
@@ -86,6 +93,63 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
+static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
+ unsigned int len,
+ bool consume)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits, ret = 0;
+
+ if (!ep->tx_credit_flow_enabled)
+ return 0;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+
+ if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d consume %d\n",
+ eid, credits, ep->tx_credits, consume);
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ if (consume) {
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits total %d\n",
+ eid, credits, ep->tx_credits);
+ }
+
+unlock:
+ spin_unlock_bh(&htc->tx_lock);
+ return ret;
+}
+
+static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits;
+
+ if (!ep->tx_credit_flow_enabled)
+ return;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+}
+
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
@@ -95,8 +159,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
- int credits = 0;
int ret;
+ unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@@ -108,23 +172,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
skb_push(skb, sizeof(struct ath10k_htc_hdr));
- if (ep->tx_credit_flow_enabled) {
- credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
- spin_lock_bh(&htc->tx_lock);
- if (ep->tx_credits < credits) {
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc insufficient credits ep %d required %d available %d\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- ret = -EAGAIN;
- goto err_pull;
- }
- ep->tx_credits -= credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d consumed %d credits (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
- }
+ skb_len = skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+ if (ret)
+ goto err_pull;
ath10k_htc_prepare_tx_skb(ep, skb);
@@ -155,17 +206,7 @@ err_unmap:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
- if (ep->tx_credit_flow_enabled) {
- spin_lock_bh(&htc->tx_lock);
- ep->tx_credits += credits;
- ath10k_dbg(ar, ATH10K_DBG_HTC,
- "htc ep %d reverted %d credits back (total %d)\n",
- eid, credits, ep->tx_credits);
- spin_unlock_bh(&htc->tx_lock);
-
- if (ep->ep_ops.ep_tx_credits)
- ep->ep_ops.ep_tx_credits(htc->ar);
- }
+ ath10k_htc_release_credit(ep, skb_len);
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
@@ -581,6 +622,273 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
return allocation;
}
+static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ struct sk_buff *bundle_skb,
+ struct sk_buff_head *tx_save_head)
+{
+ struct ath10k_hif_sg_item sg_item;
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int ret, cn = 0;
+ unsigned int skb_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
+ skb_len = bundle_skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+
+ if (!ret) {
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = bundle_skb;
+ sg_item.vaddr = bundle_skb->data;
+ sg_item.len = bundle_skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ ath10k_htc_release_credit(ep, skb_len);
+ }
+
+ if (ret)
+ dev_kfree_skb_any(bundle_skb);
+
+ for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
+ if (ret) {
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ skb_queue_head(&ep->tx_req_head, skb);
+ } else {
+ skb_queue_tail(&ep->tx_complete_head, skb);
+ }
+ }
+
+ if (!ret)
+ queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "bundle tx status %d eid %d req count %d count %d len %d\n",
+ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
+ return ret;
+}
+
+static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ int ret;
+
+ ret = ath10k_htc_send(htc, ep->eid, skb);
+
+ if (ret)
+ skb_queue_head(&ep->tx_req_head, skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
+ ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
+}
+
+static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct sk_buff *bundle_skb, *skb;
+ struct sk_buff_head tx_save_head;
+ struct ath10k_htc_hdr *hdr;
+ u8 *bundle_buf;
+ int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
+ return 0;
+
+ bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb)
+ return -ENOMEM;
+
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_req_head);
+ if (!skb)
+ break;
+
+ credit_pad = 0;
+ trans_len = skb->len + sizeof(*hdr);
+ credit_remainder = trans_len % ep->tx_credit_size;
+
+ if (credit_remainder != 0) {
+ credit_pad = ep->tx_credit_size - credit_remainder;
+ trans_len += credit_pad;
+ }
+
+ ret = ath10k_htc_consume_credit(ep,
+ bundle_buf + trans_len - bundle_skb->data,
+ false);
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ break;
+ }
+
+ if (bundles_left < trans_len) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return ret;
+ }
+
+ if (skb_queue_len(&ep->tx_req_head) == 0) {
+ ath10k_htc_send_one_skb(ep, skb);
+ return ret;
+ }
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return 0;
+ }
+
+ bundles_left =
+ ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return -ENOMEM;
+ }
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ memcpy(bundle_buf, skb->data, skb->len);
+ hdr = (struct ath10k_htc_hdr *)bundle_buf;
+ hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
+ hdr->pad_len = __cpu_to_le16(credit_pad);
+ bundle_buf += trans_len;
+ bundles_left -= trans_len;
+ skb_queue_tail(&tx_save_head, skb);
+ }
+
+ if (bundle_buf != bundle_skb->data) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+ } else {
+ dev_kfree_skb_any(bundle_skb);
+ }
+
+ return ret;
+}
+
+static void ath10k_htc_bundle_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ if (skb_queue_len(&ep->tx_req_head) >=
+ ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
+ ath10k_htc_send_bundle_skbs(ep);
+ } else {
+ skb = skb_dequeue(&ep->tx_req_head);
+
+ if (!skb)
+ continue;
+ ath10k_htc_send_one_skb(ep, skb);
+ }
+ }
+}
+
+static void ath10k_htc_tx_complete_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+ eid = ep->eid;
+ if (ep->bundle_tx && eid == ar->htt.eid) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
+ ep->eid, skb_queue_len(&ep->tx_complete_head));
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_complete_head);
+ if (!skb)
+ break;
+ ath10k_htc_notify_tx_completion(ep, skb);
+ }
+ }
+ }
+}
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k *ar = htc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
+ eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
+
+ if (ep->bundle_tx) {
+ skb_queue_tail(&ep->tx_req_head, skb);
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+ return 0;
+ } else {
+ return ath10k_htc_send(htc, eid, skb);
+ }
+}
+
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
+{
+ if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
+ !ep->bundle_tx) {
+ ep->bundle_tx = true;
+ skb_queue_head_init(&ep->tx_req_head);
+ skb_queue_head_init(&ep->tx_complete_head);
+ }
+}
+
+void ath10k_htc_stop_hl(struct ath10k *ar)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ cancel_work_sync(&ar->bundle_tx_work);
+ cancel_work_sync(&ar->tx_complete_work);
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ skb_queue_purge(&ep->tx_req_head);
+ }
+}
+
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@@ -657,6 +965,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
htc->max_msgs_per_htc_bundle);
}
+ INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
+ INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
+
return 0;
}
@@ -801,6 +1112,7 @@ setup:
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;