summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c')
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c152
1 files changed, 123 insertions, 29 deletions
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 8f8b9373de95..456944a6a2db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -24,13 +24,13 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "proto.h"
#include "msgbuf.h"
#include "commonring.h"
#include "flowring.h"
-#include "dhd_bus.h"
+#include "bus.h"
#include "tracepoint.h"
@@ -208,6 +208,14 @@ struct msgbuf_flowring_flush_resp {
__le32 rsvd0[3];
};
+struct brcmf_msgbuf_work_item {
+ struct list_head queue;
+ u32 flowid;
+ int ifidx;
+ u8 sa[ETH_ALEN];
+ u8 da[ETH_ALEN];
+};
+
struct brcmf_msgbuf {
struct brcmf_pub *drvr;
@@ -230,7 +238,7 @@ struct brcmf_msgbuf {
dma_addr_t ioctbuf_handle;
u32 ioctbuf_phys_hi;
u32 ioctbuf_phys_lo;
- u32 ioctl_resp_status;
+ int ioctl_resp_status;
u32 ioctl_resp_ret_len;
u32 ioctl_resp_pktid;
@@ -248,6 +256,10 @@ struct brcmf_msgbuf {
struct work_struct txflow_work;
unsigned long *flow_map;
unsigned long *txstatus_done_map;
+
+ struct work_struct flowring_work;
+ spinlock_t flowring_work_lock;
+ struct list_head work_queue;
};
struct brcmf_msgbuf_pktid {
@@ -284,11 +296,11 @@ brcmf_msgbuf_init_pktids(u32 nr_array_entries,
struct brcmf_msgbuf_pktid *array;
struct brcmf_msgbuf_pktids *pktids;
- array = kcalloc(nr_array_entries, sizeof(*array), GFP_ATOMIC);
+ array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
if (!array)
return NULL;
- pktids = kzalloc(sizeof(*pktids), GFP_ATOMIC);
+ pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
if (!pktids) {
kfree(array);
return NULL;
@@ -506,8 +518,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
len : msgbuf->ioctl_resp_ret_len);
}
- if (skb)
- brcmu_pkt_buf_free_skb(skb);
+ brcmu_pkt_buf_free_skb(skb);
return msgbuf->ioctl_resp_status;
}
@@ -544,11 +555,29 @@ brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
}
-static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
- struct sk_buff *skb)
+static struct brcmf_msgbuf_work_item *
+brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
+{
+ struct brcmf_msgbuf_work_item *work = NULL;
+ ulong flags;
+
+ spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
+ if (!list_empty(&msgbuf->work_queue)) {
+ work = list_first_entry(&msgbuf->work_queue,
+ struct brcmf_msgbuf_work_item, queue);
+ list_del(&work->queue);
+ }
+ spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
+
+ return work;
+}
+
+
+static u32
+brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
+ struct brcmf_msgbuf_work_item *work)
{
struct msgbuf_tx_flowring_create_req *create;
- struct ethhdr *eh = (struct ethhdr *)(skb->data);
struct brcmf_commonring *commonring;
void *ret_ptr;
u32 flowid;
@@ -557,16 +586,11 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
long long address;
int err;
- flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
- skb->priority, ifidx);
- if (flowid == BRCMF_FLOWRING_INVALID_ID)
- return flowid;
-
+ flowid = work->flowid;
dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
-
dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
&msgbuf->flowring_dma_handle[flowid],
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dma_buf) {
brcmf_err("dma_alloc_coherent failed\n");
brcmf_flowring_delete(msgbuf->flow, flowid);
@@ -589,13 +613,13 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
- create->msg.ifidx = ifidx;
+ create->msg.ifidx = work->ifidx;
create->msg.request_id = 0;
create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
create->flow_ring_id = cpu_to_le16(flowid +
BRCMF_NROF_H2D_COMMON_MSGRINGS);
- memcpy(create->sa, eh->h_source, ETH_ALEN);
- memcpy(create->da, eh->h_dest, ETH_ALEN);
+ memcpy(create->sa, work->sa, ETH_ALEN);
+ memcpy(create->da, work->da, ETH_ALEN);
address = (long long)(long)msgbuf->flowring_dma_handle[flowid];
create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
@@ -603,7 +627,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
- flowid, eh->h_dest, create->tid, ifidx);
+ flowid, work->da, create->tid, work->ifidx);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
@@ -617,6 +641,53 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
}
+static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
+{
+ struct brcmf_msgbuf *msgbuf;
+ struct brcmf_msgbuf_work_item *create;
+
+ msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
+
+ while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
+ brcmf_msgbuf_flowring_create_worker(msgbuf, create);
+ kfree(create);
+ }
+}
+
+
+static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
+ struct sk_buff *skb)
+{
+ struct brcmf_msgbuf_work_item *create;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ u32 flowid;
+ ulong flags;
+
+ create = kzalloc(sizeof(*create), GFP_ATOMIC);
+ if (create == NULL)
+ return BRCMF_FLOWRING_INVALID_ID;
+
+ flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
+ skb->priority, ifidx);
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
+ kfree(create);
+ return flowid;
+ }
+
+ create->flowid = flowid;
+ create->ifidx = ifidx;
+ memcpy(create->sa, eh->h_source, ETH_ALEN);
+ memcpy(create->da, eh->h_dest, ETH_ALEN);
+
+ spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
+ list_add_tail(&create->queue, &msgbuf->work_queue);
+ spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
+ schedule_work(&msgbuf->flowring_work);
+
+ return flowid;
+}
+
+
static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
{
struct brcmf_flowring *flow = msgbuf->flow;
@@ -767,7 +838,8 @@ brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
- msgbuf->ioctl_resp_status = le16_to_cpu(ioctl_resp->compl_hdr.status);
+ msgbuf->ioctl_resp_status =
+ (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
@@ -1008,8 +1080,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
{
struct brcmf_if *ifp;
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (ifidx)
+ (ifidx)++;
ifp = msgbuf->drvr->iflist[ifidx];
if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
brcmu_pkt_buf_free_skb(skb);
return;
}
@@ -1271,7 +1352,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
u32 count;
if_msgbuf = drvr->bus_if->msgbuf;
- msgbuf = kzalloc(sizeof(*msgbuf), GFP_ATOMIC);
+ msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
if (!msgbuf)
goto fail;
@@ -1282,11 +1363,12 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
- msgbuf->flow_map = kzalloc(count, GFP_ATOMIC);
+ count = count * sizeof(unsigned long);
+ msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->flow_map)
goto fail;
- msgbuf->txstatus_done_map = kzalloc(count, GFP_ATOMIC);
+ msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->txstatus_done_map)
goto fail;
@@ -1294,7 +1376,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
BRCMF_TX_IOCTL_MAX_MSG_SIZE,
&msgbuf->ioctbuf_handle,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!msgbuf->ioctbuf)
goto fail;
address = (long long)(long)msgbuf->ioctbuf_handle;
@@ -1317,7 +1399,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
- sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC);
+ sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
if (!msgbuf->flowring_dma_handle)
goto fail;
@@ -1357,6 +1439,10 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
brcmf_msgbuf_rxbuf_event_post(msgbuf);
brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
+ INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
+ spin_lock_init(&msgbuf->flowring_work_lock);
+ INIT_LIST_HEAD(&msgbuf->work_queue);
+
return 0;
fail:
@@ -1379,11 +1465,19 @@ fail:
void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
{
struct brcmf_msgbuf *msgbuf;
+ struct brcmf_msgbuf_work_item *work;
brcmf_dbg(TRACE, "Enter\n");
if (drvr->proto->pd) {
msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
-
+ cancel_work_sync(&msgbuf->flowring_work);
+ while (!list_empty(&msgbuf->work_queue)) {
+ work = list_first_entry(&msgbuf->work_queue,
+ struct brcmf_msgbuf_work_item,
+ queue);
+ list_del(&work->queue);
+ kfree(work);
+ }
kfree(msgbuf->flow_map);
kfree(msgbuf->txstatus_done_map);
if (msgbuf->txflow_wq)