summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-05-28 18:21:51 +0300
committerMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-05-28 18:21:51 +0300
commit1e0566fd4a81bac04aed6af818e6938e6c71d389 (patch)
tree38df614f2951905d48f744c1a9393afd3b34ce4e /lib/rhashtable.c
parent2c41cc0be07b5ee2f1167f41cd8a86fc5b53d82c (diff)
parentcd6c84d8f0cdc911df435bb075ba22ce3c605b07 (diff)
downloadlinux-1e0566fd4a81bac04aed6af818e6938e6c71d389.tar.xz
Merge tag 'v5.2-rc2' into patchwork
Merge back from upstream into media tree, as there are some patches merged upstream that has pontential of causing conflicts (one actually rised a conflict already). Linux 5.2-rc2 * tag 'v5.2-rc2': (377 commits) Linux 5.2-rc2 random: fix soft lockup when trying to read from an uninitialized blocking pool tracing: Silence GCC 9 array bounds warning ext4: fix dcache lookup of !casefolded directories locking/lock_events: Use this_cpu_add() when necessary KVM: x86: fix return value for reserved EFER tools/kvm_stat: fix fields filter for child events KVM: selftests: Wrap vcpu_nested_state_get/set functions with x86 guard kvm: selftests: aarch64: compile with warnings on kvm: selftests: aarch64: fix default vm mode kvm: selftests: aarch64: dirty_log_test: fix unaligned memslot size KVM: s390: fix memory slot handling for KVM_SET_USER_MEMORY_REGION KVM: x86/pmu: do not mask the value that is written to fixed PMUs KVM: x86/pmu: mask the result of rdpmc according to the width of the counters x86/kvm/pmu: Set AMD's virt PMU version to 1 KVM: x86: do not spam dmesg with VMCS/VMCB dumps kvm: Check irqchip mode before assign irqfd kvm: svm/avic: fix off-by-one in checking host APIC ID KVM: selftests: do not blindly clobber registers in guest asm KVM: selftests: Remove duplicated TEST_ASSERT in hyperv_cpuid.c ...
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6529fe1b45c1..935ec80f213f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -34,7 +34,7 @@
union nested_table {
union nested_table __rcu *table;
- struct rhash_lock_head __rcu *bucket;
+ struct rhash_lock_head *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@@ -131,7 +131,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
- if (cmpxchg(prev, NULL, ntbl) == NULL)
+ if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
return ntbl;
/* Raced with another thread. */
kfree(ntbl);
@@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@@ -296,7 +296,8 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
* rcu_assign_pointer().
*/
- if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
+ new_tbl) != NULL)
return -EEXIST;
return 0;
@@ -478,7 +479,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@@ -529,7 +530,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
}
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
struct bucket_table *tbl,
unsigned int hash,
struct rhash_head *obj,
@@ -584,7 +585,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
- struct rhash_lock_head __rcu **bkt;
+ struct rhash_lock_head **bkt;
unsigned int hash;
void *data;
@@ -1166,8 +1167,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
-struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1195,10 +1196,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
-struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
{
- static struct rhash_lock_head __rcu *rhnull;
+ static struct rhash_lock_head *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@@ -1206,9 +1207,9 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
-struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);