summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/rhashtable.h408
-rw-r--r--lib/rhashtable.c396
-rw-r--r--lib/test_rhashtable.c33
-rw-r--r--net/netfilter/nft_hash.c70
-rw-r--r--net/netlink/af_netlink.c88
-rw-r--r--net/tipc/socket.c32
6 files changed, 538 insertions, 489 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 99425f2be708..c3034de2c235 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,14 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper by Josh Triplett, Paul E. McKenney
- * and Jonathan Walpole:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -22,6 +21,7 @@
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* The end of the chain is marked with a special nulls marks which has
@@ -42,6 +42,9 @@
#define RHT_HASH_BITS 27
#define RHT_BASE_SHIFT RHT_HASH_BITS
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -72,8 +75,20 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
struct rhashtable;
@@ -89,6 +104,7 @@ struct rhashtable;
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key
* @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
size_t nelem_hint;
@@ -101,6 +117,7 @@ struct rhashtable_params {
size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
};
/**
@@ -165,6 +182,83 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
return ((unsigned long) ptr) >> 1;
}
+static inline void *rht_obj(const struct rhashtable *ht,
+ const struct rhash_head *he)
+{
+ return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ return rht_bucket_index(tbl, params.hashfn(key, params.key_len ?:
+ ht->p.key_len,
+ tbl->hash_rnd));
+}
+
+static inline unsigned int rht_head_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const struct rhash_head *he, const struct rhashtable_params params)
+{
+ const char *ptr = rht_obj(ht, he);
+
+ return likely(params.obj_hashfn) ?
+ rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) :
+ rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Expand table when exceeding 75% load */
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Shrink table beneath 30% load */
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->size > ht->p.min_size;
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return &tbl->locks[hash & tbl->locks_mask];
+}
+
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -181,24 +275,16 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
-void *rhashtable_lookup(struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
- bool (*compare)(void *, void *), void *arg);
-
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
- struct rhash_head *obj,
- bool (*compare)(void *, void *),
- void *arg);
-
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
@@ -355,4 +441,292 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ const char *ptr = obj;
+
+ return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline int __rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ int err = -EEXIST;
+ struct bucket_table *tbl, *new_tbl;
+ struct rhash_head *head;
+ spinlock_t *lock;
+ unsigned hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ /* Because we have already taken the bucket lock in tbl,
+ * if we find that future_tbl is not yet visible then
+ * that guarantees all other insertions of the same entry
+ * will also grab the bucket lock in tbl because until
+ * the rehash completes ht->tbl won't be changed.
+ */
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(new_tbl)) {
+ err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ goto out;
+ }
+
+ if (!key)
+ goto skip_lookup;
+
+ rht_for_each(head, tbl, hash) {
+ if (unlikely(!(params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))))
+ goto out;
+ }
+
+skip_lookup:
+ err = 0;
+
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ spin_unlock_bh(lock);
+ rcu_read_unlock();
+
+ return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+ params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ * with explicit key
+ * @ht: hash table
+ * @key: key
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+static inline int __rhashtable_remove_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t * lock;
+ unsigned hash;
+ int err = -ENOENT;
+
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(*pprev, obj->next);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ if (err)
+ goto out;
+
+ atomic_dec(&ht->nelems);
+ if (rht_shrink_below_30(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e75c48d9d82f..83cfedd6612a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,13 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,53 +30,11 @@
#define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 128UL
-/* Base bits plus 1 bit for nulls marker */
-#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
-
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
-{
- return &tbl->locks[hash & tbl->locks_mask];
-}
-
-static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
-{
- return (void *) he - ht->p.head_offset;
-}
-
-static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
-{
- return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1);
-}
-
-static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
- const void *key)
-{
- return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len,
- tbl->hash_rnd));
-}
-
static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
const struct rhash_head *he)
{
- const char *ptr = rht_obj(ht, he);
-
- return likely(ht->p.key_len) ?
- key_hashfn(ht, tbl, ptr + ht->p.key_offset) :
- rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd));
+ return rht_head_hashfn(ht, tbl, he, ht->p);
}
#ifdef CONFIG_PROVE_LOCKING
@@ -90,7 +48,7 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
- spinlock_t *lock = bucket_lock(tbl, hash);
+ spinlock_t *lock = rht_bucket_lock(tbl, hash);
return (debug_locks) ? lockdep_is_held(lock) : 1;
}
@@ -178,32 +136,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return tbl;
}
-/**
- * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
- * @ht: hash table
- * @tbl: current table
- */
-static bool rht_grow_above_75(const struct rhashtable *ht,
- const struct bucket_table *tbl)
-{
- /* Expand table when exceeding 75% load */
- return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
- (!ht->p.max_size || tbl->size < ht->p.max_size);
-}
-
-/**
- * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
- * @ht: hash table
- * @tbl: current table
- */
-static bool rht_shrink_below_30(const struct rhashtable *ht,
- const struct bucket_table *tbl)
-{
- /* Shrink table beneath 30% load */
- return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
- tbl->size > ht->p.min_size;
-}
-
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -230,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
new_hash = head_hashfn(ht, new_tbl, entry);
- new_bucket_lock = bucket_lock(new_tbl, new_hash);
+ new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
@@ -255,7 +187,7 @@ static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
spinlock_t *old_bucket_lock;
- old_bucket_lock = bucket_lock(old_tbl, old_hash);
+ old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
spin_lock_bh(old_bucket_lock);
while (!rhashtable_rehash_one(ht, old_hash))
@@ -376,315 +308,36 @@ unlock:
mutex_unlock(&ht->mutex);
}
-static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
- bool (*compare)(void *, void *), void *arg)
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *tbl)
{
- struct bucket_table *tbl, *old_tbl;
struct rhash_head *head;
- bool no_resize_running;
unsigned hash;
- spinlock_t *old_lock;
- bool success = true;
-
- rcu_read_lock();
-
- old_tbl = rht_dereference_rcu(ht->tbl, ht);
- hash = head_hashfn(ht, old_tbl, obj);
- old_lock = bucket_lock(old_tbl, hash);
+ int err = -EEXIST;
- spin_lock_bh(old_lock);
-
- /* Because we have already taken the bucket lock in old_tbl,
- * if we find that future_tbl is not yet visible then that
- * guarantees all other insertions of the same entry will
- * also grab the bucket lock in old_tbl because until the
- * rehash completes ht->tbl won't be changed.
- */
- tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl;
- if (tbl != old_tbl) {
- hash = head_hashfn(ht, tbl, obj);
- spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
- }
+ hash = head_hashfn(ht, tbl, obj);
+ spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
- if (compare &&
- rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
- compare, arg)) {
- success = false;
+ if (key && rhashtable_lookup_fast(ht, key, ht->p))
goto exit;
- }
- no_resize_running = tbl == old_tbl;
+ err = 0;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
- if (rht_is_a_nulls(head))
- INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
- else
- RCU_INIT_POINTER(obj->next, head);
+ RCU_INIT_POINTER(obj->next, head);
rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems);
- if (no_resize_running && rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
exit:
- if (tbl != old_tbl)
- spin_unlock(bucket_lock(tbl, hash));
-
- spin_unlock_bh(old_lock);
-
- rcu_read_unlock();
-
- return success;
-}
-
-/**
- * rhashtable_insert - insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Will take a per bucket spinlock to protect against mutual mutations
- * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
-{
- __rhashtable_insert(ht, obj, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(rhashtable_insert);
-
-static bool __rhashtable_remove(struct rhashtable *ht,
- struct bucket_table *tbl,
- struct rhash_head *obj)
-{
- struct rhash_head __rcu **pprev;
- struct rhash_head *he;
- spinlock_t * lock;
- unsigned hash;
- bool ret = false;
-
- hash = head_hashfn(ht, tbl, obj);
- lock = bucket_lock(tbl, hash);
-
- spin_lock_bh(lock);
-
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
- if (he != obj) {
- pprev = &he->next;
- continue;
- }
-
- rcu_assign_pointer(*pprev, obj->next);
- ret = true;
- break;
- }
-
- spin_unlock_bh(lock);
-
- return ret;
-}
-
-/**
- * rhashtable_remove - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
-{
- struct bucket_table *tbl;
- bool ret;
-
- rcu_read_lock();
-
- tbl = rht_dereference_rcu(ht->tbl, ht);
-
- /* Because we have already taken (and released) the bucket
- * lock in old_tbl, if we find that future_tbl is not yet
- * visible then that guarantees the entry to still be in
- * the old tbl if it exists.
- */
- while (!(ret = __rhashtable_remove(ht, tbl, obj)) &&
- (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
- ;
-
- if (ret) {
- atomic_dec(&ht->nelems);
- if (rht_shrink_below_30(ht, tbl))
- schedule_work(&ht->run_work);
- }
-
- rcu_read_unlock();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(rhashtable_remove);
-
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-static bool rhashtable_compare(void *ptr, void *arg)
-{
- struct rhashtable_compare_arg *x = arg;
- struct rhashtable *ht = x->ht;
-
- return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
-}
-
-/**
- * rhashtable_lookup - lookup key in hash table
- * @ht: hash table
- * @key: pointer to key
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- */
-void *rhashtable_lookup(struct rhashtable *ht, const void *key)
-{
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = key,
- };
-
- BUG_ON(!ht->p.key_len);
-
- return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup);
-
-/**
- * rhashtable_lookup_compare - search hash table with compare function
- * @ht: hash table
- * @key: the pointer to the key
- * @compare: compare function, must return true on match
- * @arg: argument passed on to compare function
- *
- * Traverses the bucket chain behind the provided hash value and calls the
- * specified compare function for each entry.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Returns the first entry on which the compare function returned true.
- */
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
- bool (*compare)(void *, void *), void *arg)
-{
- const struct bucket_table *tbl;
- struct rhash_head *he;
- u32 hash;
-
- rcu_read_lock();
-
- tbl = rht_dereference_rcu(ht->tbl, ht);
-restart:
- hash = key_hashfn(ht, tbl, key);
- rht_for_each_rcu(he, tbl, hash) {
- if (!compare(rht_obj(ht, he), arg))
- continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
- }
-
- /* Ensure we see any new tables. */
- smp_rmb();
-
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(tbl))
- goto restart;
- rcu_read_unlock();
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+ spin_unlock(rht_bucket_lock(tbl, hash));
-/**
- * rhashtable_lookup_insert - lookup and insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
-{
- struct rhashtable_compare_arg arg = {
- .ht = ht,
- .key = rht_obj(ht, obj) + ht->p.key_offset,
- };
-
- BUG_ON(!ht->p.key_len);
-
- return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
- &arg);
-}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
-
-/**
- * rhashtable_lookup_compare_insert - search and insert object to hash table
- * with compare function
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @compare: compare function, must return true on match
- * @arg: argument passed on to compare function
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
- struct rhash_head *obj,
- bool (*compare)(void *, void *),
- void *arg)
-{
- BUG_ON(!ht->p.key_len);
-
- return __rhashtable_insert(ht, obj, compare, arg);
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
* rhashtable_walk_init - Initialise an iterator
@@ -870,7 +523,7 @@ out:
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
-static size_t rounded_hashtable_size(struct rhashtable_params *params)
+static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
(unsigned long)params->min_size);
@@ -919,15 +572,16 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params)
{
struct bucket_table *tbl;
size_t size;
size = HASH_DEFAULT_SIZE;
- if ((params->key_len && !params->hashfn) ||
- (!params->key_len && !params->obj_hashfn))
+ if ((!(params->key_len && params->hashfn) && !params->obj_hashfn) ||
+ (params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
@@ -946,7 +600,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (params->max_size)
ht->p.max_size = rounddown_pow_of_two(params->max_size);
- ht->p.min_size = max(params->min_size, HASH_MIN_SIZE);
+ ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 2bc403d8f9ee..a2ba6adb60a2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -38,6 +38,16 @@ struct test_obj {
struct rhash_head node;
};
+static const struct rhashtable_params test_rht_params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = jhash,
+ .max_size = 2, /* we expand/shrink manually here */
+ .nulls_base = (3U << RHT_BASE_SHIFT),
+};
+
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
@@ -47,7 +57,7 @@ static int __init test_rht_lookup(struct rhashtable *ht)
bool expected = !(i % 2);
u32 key = i;
- obj = rhashtable_lookup(ht, &key);
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
pr_warn("Test failed: Could not find key %u\n", key);
@@ -133,7 +143,11 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj->ptr = TEST_PTR;
obj->value = i * 2;
- rhashtable_insert(ht, &obj->node);
+ err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
+ if (err) {
+ kfree(obj);
+ goto error;
+ }
}
rcu_read_lock();
@@ -173,10 +187,10 @@ static int __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < TEST_ENTRIES; i++) {
u32 key = i * 2;
- obj = rhashtable_lookup(ht, &key);
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
BUG_ON(!obj);
- rhashtable_remove(ht, &obj->node);
+ rhashtable_remove_fast(ht, &obj->node, test_rht_params);
kfree(obj);
}
@@ -195,20 +209,11 @@ static struct rhashtable ht;
static int __init test_rht_init(void)
{
- struct rhashtable_params params = {
- .nelem_hint = TEST_HT_SIZE,
- .head_offset = offsetof(struct test_obj, node),
- .key_offset = offsetof(struct test_obj, value),
- .key_len = sizeof(int),
- .hashfn = jhash,
- .max_size = 2, /* we expand/shrink manually here */
- .nulls_base = (3U << RHT_BASE_SHIFT),
- };
int err;
pr_info("Running resizable hashtable tests...\n");
- err = rhashtable_init(&ht, &params);
+ err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
err);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c82df0a48fcd..4585c5724391 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -29,6 +29,8 @@ struct nft_hash_elem {
struct nft_data data[];
};
+static const struct rhashtable_params nft_hash_params;
+
static bool nft_hash_lookup(const struct nft_set *set,
const struct nft_data *key,
struct nft_data *data)
@@ -36,7 +38,7 @@ static bool nft_hash_lookup(const struct nft_set *set,
struct rhashtable *priv = nft_set_priv(set);
const struct nft_hash_elem *he;
- he = rhashtable_lookup(priv, key);
+ he = rhashtable_lookup_fast(priv, key, nft_hash_params);
if (he && set->flags & NFT_SET_MAP)
nft_data_copy(data, he->data);
@@ -49,6 +51,7 @@ static int nft_hash_insert(const struct nft_set *set,
struct rhashtable *priv = nft_set_priv(set);
struct nft_hash_elem *he;
unsigned int size;
+ int err;
if (elem->flags != 0)
return -EINVAL;
@@ -65,9 +68,11 @@ static int nft_hash_insert(const struct nft_set *set,
if (set->flags & NFT_SET_MAP)
nft_data_copy(he->data, &elem->data);
- rhashtable_insert(priv, &he->node);
+ err = rhashtable_insert_fast(priv, &he->node, nft_hash_params);
+ if (err)
+ kfree(he);
- return 0;
+ return err;
}
static void nft_hash_elem_destroy(const struct nft_set *set,
@@ -84,46 +89,26 @@ static void nft_hash_remove(const struct nft_set *set,
{
struct rhashtable *priv = nft_set_priv(set);
- rhashtable_remove(priv, elem->cookie);
+ rhashtable_remove_fast(priv, elem->cookie, nft_hash_params);
synchronize_rcu();
kfree(elem->cookie);
}
-struct nft_compare_arg {
- const struct nft_set *set;
- struct nft_set_elem *elem;
-};
-
-static bool nft_hash_compare(void *ptr, void *arg)
-{
- struct nft_hash_elem *he = ptr;
- struct nft_compare_arg *x = arg;
-
- if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
- x->elem->cookie = he;
- x->elem->flags = 0;
- if (x->set->flags & NFT_SET_MAP)
- nft_data_copy(&x->elem->data, he->data);
-
- return true;
- }
-
- return false;
-}
-
static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
{
struct rhashtable *priv = nft_set_priv(set);
- struct nft_compare_arg arg = {
- .set = set,
- .elem = elem,
- };
+ struct nft_hash_elem *he;
+
+ he = rhashtable_lookup_fast(priv, &elem->key, nft_hash_params);
+ if (!he)
+ return -ENOENT;
- if (rhashtable_lookup_compare(priv, &elem->key,
- &nft_hash_compare, &arg))
- return 0;
+ elem->cookie = he;
+ elem->flags = 0;
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&elem->data, he->data);
- return -ENOENT;
+ return 0;
}
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
@@ -181,18 +166,21 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
return sizeof(struct rhashtable);
}
+static const struct rhashtable_params nft_hash_params = {
+ .head_offset = offsetof(struct nft_hash_elem, node),
+ .key_offset = offsetof(struct nft_hash_elem, key),
+ .hashfn = jhash,
+};
+
static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const tb[])
{
struct rhashtable *priv = nft_set_priv(set);
- struct rhashtable_params params = {
- .nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT,
- .head_offset = offsetof(struct nft_hash_elem, node),
- .key_offset = offsetof(struct nft_hash_elem, key),
- .key_len = set->klen,
- .hashfn = jhash,
- };
+ struct rhashtable_params params = nft_hash_params;
+
+ params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
+ params.key_len = set->klen;
return rhashtable_init(priv, &params);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d97aed628bda..72c6b55af741 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -116,6 +116,8 @@ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock);
static struct list_head netlink_tap_all __read_mostly;
+static const struct rhashtable_params netlink_rhashtable_params;
+
static inline u32 netlink_group_mask(u32 group)
{
return group ? 1 << (group - 1) : 0;
@@ -970,41 +972,49 @@ netlink_unlock_table(void)
struct netlink_compare_arg
{
- struct net *net;
+ possible_net_t pnet;
u32 portid;
+ char trailer[];
};
-static bool netlink_compare(void *ptr, void *arg)
+#define netlink_compare_arg_len offsetof(struct netlink_compare_arg, trailer)
+
+static inline int netlink_compare(struct rhashtable_compare_arg *arg,
+ const void *ptr)
{
- struct netlink_compare_arg *x = arg;
- struct sock *sk = ptr;
+ const struct netlink_compare_arg *x = arg->key;
+ const struct netlink_sock *nlk = ptr;
- return nlk_sk(sk)->portid == x->portid &&
- net_eq(sock_net(sk), x->net);
+ return nlk->portid != x->portid ||
+ !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
+}
+
+static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
+ struct net *net, u32 portid)
+{
+ memset(arg, 0, sizeof(*arg));
+ write_pnet(&arg->pnet, net);
+ arg->portid = portid;
}
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
struct net *net)
{
- struct netlink_compare_arg arg = {
- .net = net,
- .portid = portid,
- };
+ struct netlink_compare_arg arg;
- return rhashtable_lookup_compare(&table->hash, &portid,
- &netlink_compare, &arg);
+ netlink_compare_arg_init(&arg, net, portid);
+ return rhashtable_lookup_fast(&table->hash, &arg,
+ netlink_rhashtable_params);
}
-static bool __netlink_insert(struct netlink_table *table, struct sock *sk)
+static int __netlink_insert(struct netlink_table *table, struct sock *sk)
{
- struct netlink_compare_arg arg = {
- .net = sock_net(sk),
- .portid = nlk_sk(sk)->portid,
- };
+ struct netlink_compare_arg arg;
- return rhashtable_lookup_compare_insert(&table->hash,
- &nlk_sk(sk)->node,
- &netlink_compare, &arg);
+ netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
+ return rhashtable_lookup_insert_key(&table->hash, &arg,
+ &nlk_sk(sk)->node,
+ netlink_rhashtable_params);
}
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
@@ -1066,9 +1076,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
nlk_sk(sk)->portid = portid;
sock_hold(sk);
- err = 0;
- if (!__netlink_insert(table, sk)) {
- err = -EADDRINUSE;
+ err = __netlink_insert(table, sk);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
sock_put(sk);
}
@@ -1082,7 +1093,8 @@ static void netlink_remove(struct sock *sk)
struct netlink_table *table;
table = &nl_table[sk->sk_protocol];
- if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
+ if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
+ netlink_rhashtable_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
@@ -3114,17 +3126,28 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
.exit = netlink_net_exit,
};
+static inline u32 netlink_hash(const void *data, u32 seed)
+{
+ const struct netlink_sock *nlk = data;
+ struct netlink_compare_arg arg;
+
+ netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
+ return jhash(&arg, netlink_compare_arg_len, seed);
+}
+
+static const struct rhashtable_params netlink_rhashtable_params = {
+ .head_offset = offsetof(struct netlink_sock, node),
+ .key_len = netlink_compare_arg_len,
+ .hashfn = jhash,
+ .obj_hashfn = netlink_hash,
+ .obj_cmpfn = netlink_compare,
+ .max_size = 65536,
+};
+
static int __init netlink_proto_init(void)
{
int i;
int err = proto_register(&netlink_proto, 0);
- struct rhashtable_params ht_params = {
- .head_offset = offsetof(struct netlink_sock, node),
- .key_offset = offsetof(struct netlink_sock, portid),
- .key_len = sizeof(u32), /* portid */
- .hashfn = jhash,
- .max_size = 65536,
- };
if (err != 0)
goto out;
@@ -3136,7 +3159,8 @@ static int __init netlink_proto_init(void)
goto panic;
for (i = 0; i < MAX_LINKS; i++) {
- if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
+ if (rhashtable_init(&nl_table[i].hash,
+ &netlink_rhashtable_params) < 0) {
while (--i > 0)
rhashtable_destroy(&nl_table[i].hash);
kfree(nl_table);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c03a3d33806f..73c2f518a7c0 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -133,6 +133,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
[TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
};
+static const struct rhashtable_params tsk_rht_params;
+
/*
* Revised TIPC socket locking policy:
*
@@ -2245,7 +2247,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
struct tipc_sock *tsk;
rcu_read_lock();
- tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+ tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
if (tsk)
sock_hold(&tsk->sk);
rcu_read_unlock();
@@ -2267,7 +2269,8 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
portid = TIPC_MIN_PORT;
tsk->portid = portid;
sock_hold(&tsk->sk);
- if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+ if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
+ tsk_rht_params))
return 0;
sock_put(&tsk->sk);
}
@@ -2280,26 +2283,27 @@ static void tipc_sk_remove(struct tipc_sock *tsk)
struct sock *sk = &tsk->sk;
struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
- if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+ if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
}
+static const struct rhashtable_params tsk_rht_params = {
+ .nelem_hint = 192,
+ .head_offset = offsetof(struct tipc_sock, node),
+ .key_offset = offsetof(struct tipc_sock, portid),
+ .key_len = sizeof(u32), /* portid */
+ .hashfn = jhash,
+ .max_size = 1048576,
+ .min_size = 256,
+};
+
int tipc_sk_rht_init(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct rhashtable_params rht_params = {
- .nelem_hint = 192,
- .head_offset = offsetof(struct tipc_sock, node),
- .key_offset = offsetof(struct tipc_sock, portid),
- .key_len = sizeof(u32), /* portid */
- .hashfn = jhash,
- .max_size = 1048576,
- .min_size = 256,
- };
-
- return rhashtable_init(&tn->sk_rht, &rht_params);
+
+ return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
}
void tipc_sk_rht_destroy(struct net *net)