summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephan Gerhold <stephan@gerhold.net>2021-06-18 20:36:11 +0300
committerDavid S. Miller <davem@davemloft.net>2021-06-18 23:13:40 +0300
commit31c143f712750143abaca396236bbe8707700111 (patch)
tree149cbe7b468334c730c302617fc331876e11e726 /drivers
parent5e90abf49c2adfbd6954429c2a1aafdfe9fcab92 (diff)
downloadlinux-31c143f712750143abaca396236bbe8707700111.tar.xz
net: wwan: Allow WWAN drivers to provide blocking tx and poll function
At the moment, the WWAN core provides wwan_port_txon/off() to implement blocking writes. The tx() port operation should not block, instead wwan_port_txon/off() should be called when the TX queue is full or has free space again. However, in some cases it is not straightforward to make use of that functionality. For example, the RPMSG API used by rpmsg_wwan_ctrl.c does not provide any way to be notified when the TX queue has space again. Instead, it only provides the following operations: - rpmsg_send(): blocking write (wait until there is space) - rpmsg_trysend(): non-blocking write (return error if no space) - rpmsg_poll(): set poll flags depending on TX queue state Generally that's totally sufficient for implementing a char device, but it does not fit well to the currently provided WWAN port ops. Most of the time, using the non-blocking rpmsg_trysend() in the WWAN tx() port operation works just fine. However, with high-frequent writes to the char device it is possible to trigger a situation where this causes issues. For example, consider the following (somewhat unrealistic) example: # dd if=/dev/zero bs=1000 of=/dev/wwan0qmi0 dd: error writing '/dev/wwan0qmi0': Resource temporarily unavailable 1+0 records out This fails immediately after writing the first record. It's likely only a matter of time until this triggers issues for some real application (e.g. ModemManager sending a lot of large QMI packets). The rpmsg_char device does not have this problem, because it uses rpmsg_trysend() and rpmsg_poll() to support non-blocking operations. Make it possible to use the same in the RPMSG WWAN driver by adding two new optional wwan_port_ops: - tx_blocking(): send data blocking if allowed - tx_poll(): set additional TX poll flags This integrates nicely with the RPMSG API and does not require any change in existing WWAN drivers. With these changes, the dd example above blocks instead of exiting with an error. Cc: Loic Poulain <loic.poulain@linaro.org> Signed-off-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c23
-rw-r--r--drivers/net/wwan/wwan_core.c16
2 files changed, 35 insertions, 4 deletions
diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c
index de226cdb69fd..31c24420ab2e 100644
--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -67,10 +67,33 @@ static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
return 0;
}
+static int rpmsg_wwan_ctrl_tx_blocking(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ ret = rpmsg_send(rpwwan->ept, skb->data, skb->len);
+ if (ret)
+ return ret;
+
+ consume_skb(skb);
+ return 0;
+}
+
+static __poll_t rpmsg_wwan_ctrl_tx_poll(struct wwan_port *port,
+ struct file *filp, poll_table *wait)
+{
+ struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+ return rpmsg_poll(rpwwan->ept, filp, wait);
+}
+
static const struct wwan_port_ops rpmsg_wwan_pops = {
.start = rpmsg_wwan_ctrl_start,
.stop = rpmsg_wwan_ctrl_stop,
.tx = rpmsg_wwan_ctrl_tx,
+ .tx_blocking = rpmsg_wwan_ctrl_tx_blocking,
+ .tx_poll = rpmsg_wwan_ctrl_tx_poll,
};
static struct device *rpmsg_wwan_find_parent(struct device *dev)
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 7e728042fc41..165afec1dbd1 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -500,7 +500,8 @@ static void wwan_port_op_stop(struct wwan_port *port)
mutex_unlock(&port->ops_lock);
}
-static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
+ bool nonblock)
{
int ret;
@@ -510,7 +511,10 @@ static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
goto out_unlock;
}
- ret = port->ops->tx(port, skb);
+ if (nonblock || !port->ops->tx_blocking)
+ ret = port->ops->tx(port, skb);
+ else
+ ret = port->ops->tx_blocking(port, skb);
out_unlock:
mutex_unlock(&port->ops_lock);
@@ -637,7 +641,7 @@ static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
return -EFAULT;
}
- ret = wwan_port_op_tx(port, skb);
+ ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
if (ret) {
kfree_skb(skb);
return ret;
@@ -653,12 +657,16 @@ static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &port->waitqueue, wait);
- if (!is_write_blocked(port))
+ mutex_lock(&port->ops_lock);
+ if (port->ops && port->ops->tx_poll)
+ mask |= port->ops->tx_poll(port, filp, wait);
+ else if (!is_write_blocked(port))
mask |= EPOLLOUT | EPOLLWRNORM;
if (!is_read_blocked(port))
mask |= EPOLLIN | EPOLLRDNORM;
if (!port->ops)
mask |= EPOLLHUP | EPOLLERR;
+ mutex_unlock(&port->ops_lock);
return mask;
}