summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorYunsheng Lin <linyunsheng@huawei.com>2019-05-06 05:48:48 +0300
committerDavid S. Miller <davem@davemloft.net>2019-05-07 20:37:13 +0300
commitaa9d22dd456eb255db2a4a5b214ec2e243dda4c8 (patch)
treeebd2eef59e781e4600a8065ad4e1d99415c386e4 /drivers/net/ethernet
parent757cd1e4a4d81181fcd7130c4315d169ad9f5b81 (diff)
downloadlinux-aa9d22dd456eb255db2a4a5b214ec2e243dda4c8.tar.xz
net: hns3: fix error handling for desc filling
When desc filling fails in hns3_nic_net_xmit, it will call hns3_clear_desc to unmap the dma mapping. But currently the ring->next_to_use points to the desc where the desc filling or dma mapping return error, which means the desc that ring->next_to_use points to has not done the dma mapping, the desc that need unmapping is before the ring->next_to_use. This patch fixes it by calling ring_ptr_move_bw(next_to_use) before doing unmapping operation, and set desc_cb->dma to zero to avoid freeing it again when unloading. Also, when filling skb head or frag fails, both need to unmap all the way back to next_to_use_head, so remove one desc filling error handling. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 7224b3822733..21eac68ed91c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1224,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
if (ring->next_to_use == next_to_use_orig)
break;
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
dma_unmap_single(dev,
@@ -1237,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
DMA_TO_DEVICE);
ring->desc_cb[ring->next_to_use].length = 0;
-
- /* rollback one */
- ring_ptr_move_bw(ring, next_to_use);
+ ring->desc_cb[ring->next_to_use].dma = 0;
}
}
@@ -1252,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
- int next_to_use_frag;
int buf_num;
int seg_num;
int size;
@@ -1291,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
if (unlikely(ret))
- goto head_fill_err;
+ goto fill_err;
- next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1304,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
DESC_TYPE_PAGE);
if (unlikely(ret))
- goto frag_fill_err;
+ goto fill_err;
}
/* Complete translate all packets */
@@ -1317,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_fill_err:
- hns3_clear_desc(ring, next_to_use_frag);
-
-head_fill_err:
+fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok: