summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/assoc_array.c6
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/percpu-refcount.c16
-rw-r--r--lib/rhashtable.c1
-rw-r--r--lib/string.c4
8 files changed, 38 insertions, 8 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 07c28323f88f..a28590083622 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -892,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
will test all possible w/w mutex interface abuse with the
exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1032,8 +1036,13 @@ config TRACE_IRQFLAGS
either tracing or lock debugging.
config STACKTRACE
- bool
+ bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
+ help
+ This option causes the kernel to create a /proc/pid/stack for
+ every process, showing its current stack trace.
+ It is also used by various kernel debugging features that require
+ stack trace generation.
config DEBUG_KOBJECT
bool "kobject debugging"
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c0b1007011e1..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..38d2db82228c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
+ of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..a89cf09a8268 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/*
+ * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
+ * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
+ * devel cycle. Do not use anywhere else.
+ */
+void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+{
+ WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
+ "percpu_ref_kill() called more than once on %pf!",
+ ref->release);
+
+ ref->pcpu_count_ptr |= PCPU_REF_DEAD;
+ synchronize_sched_expedited();
+ percpu_ref_kill_rcu(&ref->rcu);
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a2c78810ebc1..7b36e4d40ed7 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -23,7 +23,6 @@
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
-#include <linux/log2.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..f3c6ff596414 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else