summaryrefslogtreecommitdiff
path: root/drivers/s390/net/qeth_l2_main.c
diff options
context:
space:
mode:
authorJulian Wiedmann <jwi@linux.ibm.com>2018-07-19 13:43:56 +0300
committerDavid S. Miller <davem@davemloft.net>2018-07-21 20:12:30 +0300
commitba86ceee9d1b5aa71fe3db75b2ec5452c9a48307 (patch)
treed4af4b1cab727029b75e594f83c5cf99ecefed75 /drivers/s390/net/qeth_l2_main.c
parentd2a274b25be7218f8400037868a756640e8a4b0d (diff)
downloadlinux-ba86ceee9d1b5aa71fe3db75b2ec5452c9a48307.tar.xz
s390/qeth: merge linearize-check into HW header construction
When checking whether an skb needs to be linearized to fit into an IO buffer, it's desirable to consider the skb's final size and layout (ie. after the HW header was added). But a subsequent linearization can then cause the re-positioned HW header to violate its alignment restrictions. Dealing with this situation in two different code paths is quite tricky. This patch integrates a) linearize-check and b) HW header construction into one 3 step-sequence: 1. evaluate how the HW header needs to be added (to identify if it takes up an additional buffer element), then 2. check if the required buffer elements exceed the device's limit. Linearize when necessary and re-evaluate the HW header placement. 3. Add the HW header in the best-possible way: a) push, without taking up an additional buffer element b) push, but consume another buffer element c) allocate a header object from the cache. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/net/qeth_l2_main.c')
-rw-r--r--drivers/s390/net/qeth_l2_main.c29
1 files changed, 2 insertions, 27 deletions
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a785c5ff73cd..905f3bb3a87c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -672,39 +672,21 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
int ipv)
{
int push_len = sizeof(struct qeth_hdr);
- unsigned int hdr_elements = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
unsigned int elements;
bool is_sg;
int rc;
- /* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists
- */
- if (!qeth_get_elements_no(card, skb, 0, 0)) {
- rc = skb_linearize(skb);
-
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
- return rc;
- }
-
rc = skb_cow_head(skb, push_len);
if (rc)
return rc;
- push_len = qeth_push_hdr(skb, &hdr, push_len);
+ push_len = qeth_add_hw_header(card, skb, &hdr, push_len, &elements);
if (push_len < 0)
return push_len;
if (!push_len) {
/* hdr was allocated from cache */
hd_len = sizeof(*hdr);
- hdr_elements = 1;
}
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -713,18 +695,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
card->perf_stats.tx_csum++;
}
- elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
- if (!elements) {
- rc = -E2BIG;
- goto out;
- }
- elements += hdr_elements;
-
is_sg = skb_is_nonlinear(skb);
/* TODO: remove the skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
-out:
+
if (!rc) {
if (card->options.performance_stats) {
card->perf_stats.buf_elements_sent += elements;