summaryrefslogtreecommitdiff
path: root/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-06-13 23:49:39 +0300
committerDavid S. Miller <davem@davemloft.net>2021-06-13 23:49:39 +0300
commita212d9f33ed0b8399bd9829a779c4024068742a2 (patch)
treed5c840d5e5b3b958ba13bdd7544c4cf80cf53e88 /drivers/net/wwan/iosm/iosm_ipc_task_queue.c
parentffbbc5e5c7174dc4732f78b9577a19621079c879 (diff)
parentf7af616c632ee2ac3af0876fe33bf9e0232e665a (diff)
downloadlinux-a212d9f33ed0b8399bd9829a779c4024068742a2.tar.xz
Merge branch 'iosm-driver'
M Chetan Kumar says: ==================== net: iosm: PCIe Driver for Intel M.2 Modem The IOSM (IPC over Shared Memory) driver is a PCIe host driver implemented for linux or chrome platform for data exchange over PCIe interface between Host platform & Intel M.2 Modem. The driver exposes interface conforming to the MBIM protocol. Any front end application ( eg: Modem Manager) could easily manage the MBIM interface to enable data communication towards WWAN. Intel M.2 modem uses 2 BAR regions. The first region is dedicated to Doorbell register for IRQs and the second region is used as scratchpad area for book keeping modem execution stage details along with host system shared memory region context details. The upper edge of the driver exposes the control and data channels for user space application interaction. At lower edge these data and control channels are associated to pipes. The pipes are lowest level interfaces used over PCIe as a logical channel for message exchange. A single channel maps to UL and DL pipe and are initialized on device open. On UL path, driver copies application sent data to SKBs associate it with transfer descriptor and puts it on to ring buffer for DMA transfer. Once information has been updated in shared memory region, host gives a Doorbell to modem to perform DMA and modem uses MSI to communicate back to host. For receiving data in DL path, SKBs are pre-allocated during pipe open and transfer descriptors are given to modem for DMA transfer. The driver exposes two types of ports, namely "wwan0mbim0", a char device node which is used for MBIM control operation and "wwan0-x",(x = 0,1,2..7) network interfaces for IP data communication. 1) MBIM Control Interface: This node exposes an interface between modem and application using char device exposed by "IOSM" driver to establish and manage the MBIM data communication with PCIe based Intel M.2 Modems. 2) MBIM Data Interface: The IOSM driver exposes IP link interface "wwan0-x" of type "wwan" for IP traffic. Iproute network utility is used for creating "wwan0-x" network interface and for associating it with MBIM IP session. The Driver supports upto 8 IP sessions for simultaneous IP communication. This applies on top of WWAN core rtnetlink series posted here: https://lore.kernel.org/netdev/1623486057-13075-1-git-send-email-loic.poulain@linaro.org/ Also driver has been compiled and tested on top of netdev net-next tree. https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/ ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wwan/iosm/iosm_ipc_task_queue.c')
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.c202
1 files changed, 202 insertions, 0 deletions
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.c b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
new file mode 100644
index 000000000000..852a99166144
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Actual tasklet function, will be called whenever tasklet is scheduled.
+ * Calls event handler involves callback for each element in the message queue
+ */
+static void ipc_task_queue_handler(unsigned long data)
+{
+ struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ /* Loop over the input queue contents. */
+ while (q_rpos != ipc_task->q_wpos) {
+ /* Get the current first queue element. */
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ /* Process the input message. */
+ if (args->func)
+ args->response = args->func(args->ipc_imem, args->arg,
+ args->msg, args->size);
+
+ /* Signal completion for synchronous calls */
+ if (args->completion)
+ complete(args->completion);
+
+ /* Free message if copy was allocated. */
+ if (args->is_copy)
+ kfree(args->msg);
+
+ /* Set invalid queue element. Technically
+ * spin_lock_irqsave is not required here as
+ * the array element has been processed already
+ * so we can assume that immediately after processing
+ * ipc_task element, queue will not rotate again to
+ * ipc_task same element within such short time.
+ */
+ args->completion = NULL;
+ args->func = NULL;
+ args->msg = NULL;
+ args->size = 0;
+ args->is_copy = false;
+
+ /* calculate the new read ptr and update the volatile read
+ * ptr
+ */
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Free memory alloc and trigger completions left in the queue during dealloc */
+static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
+{
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ while (q_rpos != ipc_task->q_wpos) {
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ if (args->completion)
+ complete(args->completion);
+
+ if (args->is_copy)
+ kfree(args->msg);
+
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Add a message to the queue and trigger the ipc_task. */
+static int
+ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ size_t size, bool is_copy, bool wait)
+{
+ struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
+ struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
+ struct completion completion;
+ unsigned int pos, nextpos;
+ unsigned long flags;
+ int result = -EIO;
+
+ init_completion(&completion);
+
+ /* tasklet send may be called from both interrupt or thread
+ * context, therefore protect queue operation by spinlock
+ */
+ spin_lock_irqsave(&ipc_task->q_lock, flags);
+
+ pos = ipc_task->q_wpos;
+ nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
+
+ /* Get next queue position. */
+ if (nextpos != ipc_task->q_rpos) {
+ /* Get the reference to the queue element and save the passed
+ * values.
+ */
+ ipc_task->args[pos].arg = arg;
+ ipc_task->args[pos].msg = msg;
+ ipc_task->args[pos].func = func;
+ ipc_task->args[pos].ipc_imem = ipc_imem;
+ ipc_task->args[pos].size = size;
+ ipc_task->args[pos].is_copy = is_copy;
+ ipc_task->args[pos].completion = wait ? &completion : NULL;
+ ipc_task->args[pos].response = -1;
+
+ /* apply write barrier so that ipc_task->q_rpos elements
+ * are updated before ipc_task->q_wpos is being updated.
+ */
+ smp_wmb();
+
+ /* Update the status of the free queue space. */
+ ipc_task->q_wpos = nextpos;
+ result = 0;
+ }
+
+ spin_unlock_irqrestore(&ipc_task->q_lock, flags);
+
+ if (result == 0) {
+ tasklet_schedule(ipc_tasklet);
+
+ if (wait) {
+ wait_for_completion(&completion);
+ result = ipc_task->args[pos].response;
+ }
+ } else {
+ dev_err(ipc_imem->ipc_task->dev, "queue is full");
+ }
+
+ return result;
+}
+
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait)
+{
+ bool is_copy = false;
+ void *copy = msg;
+ int ret = -ENOMEM;
+
+ if (size > 0) {
+ copy = kmemdup(msg, size, GFP_ATOMIC);
+ if (!copy)
+ goto out;
+
+ is_copy = true;
+ }
+
+ ret = ipc_task_queue_add_task(imem, arg, copy, func,
+ size, is_copy, wait);
+ if (ret < 0) {
+ dev_err(imem->ipc_task->dev,
+ "add task failed for %ps %d, %p, %zu, %d", func, arg,
+ copy, size, is_copy);
+ if (is_copy)
+ kfree(copy);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_task_init(struct ipc_task *ipc_task)
+{
+ struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
+
+ ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
+ GFP_KERNEL);
+
+ if (!ipc_task->ipc_tasklet)
+ return -ENOMEM;
+
+ /* Initialize the spinlock needed to protect the message queue of the
+ * ipc_task
+ */
+ spin_lock_init(&ipc_queue->q_lock);
+
+ tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
+ (unsigned long)ipc_queue);
+ return 0;
+}
+
+void ipc_task_deinit(struct ipc_task *ipc_task)
+{
+ tasklet_kill(ipc_task->ipc_tasklet);
+
+ kfree(ipc_task->ipc_tasklet);
+ /* This will free/complete any outstanding messages,
+ * without calling the actual handler
+ */
+ ipc_task_queue_cleanup(&ipc_task->ipc_queue);
+}