summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-04-14 01:40:52 +0300
committerJakub Kicinski <kuba@kernel.org>2026-04-14 01:40:53 +0300
commit2258d1dffaf77bad190ac1cdd0a98db513c15232 (patch)
tree0fe934139222859f16380d90ea93c77e23bdedab
parent9ad24ba4085ebee5419cdd0dcc71050dda99af68 (diff)
parentdd66b42854705e4e4ee7f14d260f86c578bed3e3 (diff)
downloadlinux-2258d1dffaf77bad190ac1cdd0a98db513c15232.tar.xz
Merge branch 'octeon_ep_vf-fix-napi_build_skb-null-dereference'
David Carlier says: ==================== octeon_ep_vf: fix napi_build_skb() NULL dereference napi_build_skb() can return NULL on allocation failure. In __octep_vf_oq_process_rx(), the result is used directly without a NULL check in both the single-buffer and multi-fragment paths, leading to a NULL pointer dereference. Patch 1 introduces a helper to deduplicate the ring index advance pattern, patch 2 adds the actual NULL checks. ==================== Link: https://patch.msgid.link/20260409184009.930359-1-devnexen@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c47
1 files changed, 36 insertions, 11 deletions
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index b579d5b545c4..d98247408242 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -352,6 +352,11 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
return new_pkts;
}
+static inline u32 octep_vf_oq_next_idx(struct octep_vf_oq *oq, u32 idx)
+{
+ return (idx + 1 == oq->max_count) ? 0 : idx + 1;
+}
+
/**
* __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
*
@@ -409,30 +414,52 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}
- rx_bytes += buff_info->len;
-
if (buff_info->len <= oq->max_single_buffer_size) {
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ if (!skb) {
+ oq->stats->alloc_failures++;
+ desc_used++;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
+ continue;
+ }
+ rx_bytes += buff_info->len;
skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
} else {
struct skb_shared_info *shinfo;
u16 data_len;
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ if (!skb) {
+ oq->stats->alloc_failures++;
+ desc_used++;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ buff_info->page = NULL;
+ if (data_len < oq->buffer_size)
+ data_len = 0;
+ else
+ data_len -= oq->buffer_size;
+ desc_used++;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
+ }
+ continue;
+ }
+ rx_bytes += buff_info->len;
skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
@@ -454,10 +481,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
buff_info->len,
buff_info->len);
buff_info->page = NULL;
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
}
}