diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2014-02-06 19:15:39 +0400 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2014-02-07 17:22:06 +0400 |
commit | 2fb91ddbf8e1dcb207e1e2085473aeaeff975102 (patch) | |
tree | d546b5942af2d99c7e0f6482f8994f737ed6cb37 /net | |
parent | bd7fc645dabab423ef362186db2917f3919321d3 (diff) | |
download | linux-2fb91ddbf8e1dcb207e1e2085473aeaeff975102.tar.xz |
netfilter: nft_rbtree: fix data handling of end interval elements
This patch fixes several things which related to the handling of
end interval elements:
* Chain use underflow with intervals and map: If you add a rule
using intervals+map that introduces a loop, the error path of the
rbtree set decrements the chain refcount for each side of the
interval, leading to a chain use counter underflow.
* Don't copy the data part of the end interval element since, this
area is uninitialized and this confuses the loop detection code.
* Don't allocate room for the data part of end interval elements
since this is unused.
So, after this patch the idea is that end interval elements don't
have a data part.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/netfilter/nft_rbtree.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index ca0c1b231bfe..e21d69d13506 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set, struct nft_rbtree_elem *rbe) { nft_data_uninit(&rbe->key, NFT_DATA_VALUE); - if (set->flags & NFT_SET_MAP) + if (set->flags & NFT_SET_MAP && + !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) nft_data_uninit(rbe->data, set->dtype); + kfree(rbe); } @@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set, int err; size = sizeof(*rbe); - if (set->flags & NFT_SET_MAP) + if (set->flags & NFT_SET_MAP && + !(elem->flags & NFT_SET_ELEM_INTERVAL_END)) size += sizeof(rbe->data[0]); rbe = kzalloc(size, GFP_KERNEL); @@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set, rbe->flags = elem->flags; nft_data_copy(&rbe->key, &elem->key); - if (set->flags & NFT_SET_MAP) + if (set->flags & NFT_SET_MAP && + !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) nft_data_copy(rbe->data, &elem->data); err = __nft_rbtree_insert(set, rbe); @@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem) parent = parent->rb_right; else { elem->cookie = rbe; - if (set->flags & NFT_SET_MAP) + if (set->flags & NFT_SET_MAP && + !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) nft_data_copy(&elem->data, rbe->data); elem->flags = rbe->flags; return 0; @@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, rbe = rb_entry(node, struct nft_rbtree_elem, node); nft_data_copy(&elem.key, &rbe->key); - if (set->flags & NFT_SET_MAP) + if (set->flags & NFT_SET_MAP && + !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) nft_data_copy(&elem.data, rbe->data); elem.flags = rbe->flags; |