diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2017-11-28 20:05:54 +0300 |
---|---|---|
committer | Matthew Wilcox <mawilcox@microsoft.com> | 2018-02-07 00:41:27 +0300 |
commit | ffdc2d9e1afd20e9f9d205115661481e984542d6 (patch) | |
tree | aa224291034eed97a7686c6ae81b4846a800dbcb /net | |
parent | 85bd0438a249e7a7a0622e0b0b6663595fadcc27 (diff) | |
download | linux-ffdc2d9e1afd20e9f9d205115661481e984542d6.tar.xz |
cls_u32: Reinstate cyclic allocation
Commit e7614370d6f0 ("net_sched: use idr to allocate u32 filter handles)
converted htid allocation to use the IDR. The ID allocated by this
scheme changes; it used to be cyclic, but now always allocates the
lowest available. The IDR supports cyclic allocation, so just use
the right function.
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/cls_u32.c | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 5b256da985b1..d2805f24ddd3 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -316,19 +316,13 @@ static void *u32_get(struct tcf_proto *tp, u32 handle) return u32_lookup_key(ht, handle); } +/* Protected by rtnl lock */ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) { - unsigned long idr_index; - int err; - - /* This is only used inside rtnl lock it is safe to increment - * without read _copy_ update semantics - */ - err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index, - 1, 0x7FF, GFP_KERNEL); - if (err) + int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); + if (id < 0) return 0; - return (u32)(idr_index | 0x800) << 20; + return (id | 0x800U) << 20; } static struct hlist_head *tc_u_common_hash; |