summaryrefslogtreecommitdiff
path: root/net/netfilter
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2019-10-31 17:51:21 +0300
committerPablo Neira Ayuso <pablo@netfilter.org>2019-11-13 12:41:34 +0300
commitf41f72d09ee1e9a980a1675be31120f547f2a648 (patch)
tree17f8e0e4e81b4657b5082a063f3abad9686b17cb /net/netfilter
parent25da5eb32cd51383f6dca7aad252376f1979c075 (diff)
downloadlinux-f41f72d09ee1e9a980a1675be31120f547f2a648.tar.xz
netfilter: nft_payload: simplify vlan header handling
If the offset is within the ethernet + vlan header size boundary, then rebuild the ethernet + vlan header and use it to copy the bytes to the register. Otherwise, subtract the vlan header size from the offset and fall back to use skb_copy_bits(). There is one corner case though: If the offset plus the length of the payload instruction goes over the ethernet + vlan header boundary, then, fetch as many bytes as possible from the rebuilt ethernet + vlan header and fall back to copy the remaining bytes through skb_copy_bits(). Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Florian Westphal <fw@strlen.de>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nft_payload.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 5cb2d8908d2a..247799801165 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -28,17 +28,22 @@ static bool
nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
{
int mac_off = skb_mac_header(skb) - skb->data;
- u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+ u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
vlanh = (u8 *) &veth;
- if (offset < ETH_HLEN) {
- u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+ if (offset < VLAN_ETH_HLEN) {
+ u8 ethlen = len;
if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
return false;
veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+ veth.h_vlan_encapsulated_proto = skb->protocol;
+
+ if (offset + len > VLAN_ETH_HLEN)
+ ethlen -= offset + len - VLAN_ETH_HLEN;
memcpy(dst_u8, vlanh + offset, ethlen);
@@ -48,25 +53,10 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
dst_u8 += ethlen;
offset = ETH_HLEN;
- } else if (offset >= VLAN_ETH_HLEN) {
+ } else {
offset -= VLAN_HLEN;
- goto skip;
}
- veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
- veth.h_vlan_encapsulated_proto = skb->protocol;
-
- vlanh += offset;
-
- vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
- memcpy(dst_u8, vlanh, vlan_len);
-
- len -= vlan_len;
- if (!len)
- return true;
-
- dst_u8 += vlan_len;
- skip:
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}