summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Makefile7
-rw-r--r--lib/argv_split.c87
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/dma-debug.c45
-rw-r--r--lib/dump_stack.c11
-rw-r--r--lib/dynamic_debug.c49
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/genalloc.c81
-rw-r--r--lib/idr.c107
-rw-r--r--lib/int_sqrt.c32
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/notifier-error-inject.c4
-rw-r--r--lib/oid_registry.c5
-rw-r--r--lib/rbtree_test.c9
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c240
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/show_mem.c3
-rw-r--r--lib/string_helpers.c133
-rw-r--r--lib/swiotlb.c19
-rw-r--r--lib/test-string_helpers.c103
-rw-r--r--lib/ucs2_string.c51
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/uuid.c8
-rw-r--r--lib/vsprintf.c18
-rw-r--r--lib/xz/Kconfig2
30 files changed, 759 insertions, 352 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3958dc4389f9..fe01d418b09a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -404,4 +404,7 @@ config OID_REGISTRY
help
Enable fast lookup object identifier registry.
+config UCS2_STRING
+ tristate
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28be08c09bab..566cf2bc08ea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1192,7 +1192,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
bash: echo: write error: Cannot allocate memory
To compile this code as a module, choose M here: the module will
- be called pSeries-reconfig-notifier-error-inject.
+ be called memory-notifier-error-inject.
If unsure, say N.
@@ -1209,7 +1209,7 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
notified, write the error code to "actions/<notifier event>/error".
To compile this code as a module, choose M here: the module will
- be called memory-notifier-error-inject.
+ be called of-reconfig-notifier-error-inject.
If unsure, say N.
@@ -1292,6 +1292,24 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ bool
+
+config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict user copy size checks"
+ depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+ help
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time failures.
+
+ The copy_from_user() etc checks are there to help test if there
+ are sufficient security checks on the length argument of
+ the copy operation, by having gcc prove that the argument is
+ within bounds.
+
+ If unsure, say N.
+
source mm/Kconfig.debug
source kernel/trace/Kconfig
@@ -1463,5 +1481,8 @@ source "lib/Kconfig.kgdb"
source "lib/Kconfig.kmemcheck"
+config TEST_STRING_HELPERS
+ tristate "Test functions located in the string_helpers module at runtime"
+
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index d7946ff75b2e..e9c52e1b853a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -15,6 +15,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o
+obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -22,8 +23,10 @@ lib-y += kobject.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
- string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
+ gcd.o lcm.o list_sort.o uuid.o flex_array.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
+obj-y += string_helpers.o
+obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -174,3 +177,5 @@ quiet_cmd_build_OID_registry = GEN $@
cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
clean-files += oid_registry_data.c
+
+obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 1e9a6cbc3689..e927ed0e18a8 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -8,23 +8,17 @@
#include <linux/slab.h>
#include <linux/export.h>
-static const char *skip_arg(const char *cp)
-{
- while (*cp && !isspace(*cp))
- cp++;
-
- return cp;
-}
-
static int count_argc(const char *str)
{
int count = 0;
+ bool was_space;
- while (*str) {
- str = skip_spaces(str);
- if (*str) {
+ for (was_space = true; *str; str++) {
+ if (isspace(*str)) {
+ was_space = true;
+ } else if (was_space) {
+ was_space = false;
count++;
- str = skip_arg(str);
}
}
@@ -39,10 +33,8 @@ static int count_argc(const char *str)
*/
void argv_free(char **argv)
{
- char **p;
- for (p = argv; *p; p++)
- kfree(*p);
-
+ argv--;
+ kfree(argv[0]);
kfree(argv);
}
EXPORT_SYMBOL(argv_free);
@@ -59,43 +51,44 @@ EXPORT_SYMBOL(argv_free);
* considered to be a single argument separator. The returned array
* is always NULL-terminated. Returns NULL on memory allocation
* failure.
+ *
+ * The source string at `str' may be undergoing concurrent alteration via
+ * userspace sysctl activity (at least). The argv_split() implementation
+ * attempts to handle this gracefully by taking a local copy to work on.
*/
char **argv_split(gfp_t gfp, const char *str, int *argcp)
{
- int argc = count_argc(str);
- char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp);
- char **argvp;
-
- if (argv == NULL)
- goto out;
-
- if (argcp)
- *argcp = argc;
-
- argvp = argv;
-
- while (*str) {
- str = skip_spaces(str);
-
- if (*str) {
- const char *p = str;
- char *t;
-
- str = skip_arg(str);
+ char *argv_str;
+ bool was_space;
+ char **argv, **argv_ret;
+ int argc;
+
+ argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
+ if (!argv_str)
+ return NULL;
+
+ argc = count_argc(argv_str);
+ argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
+ if (!argv) {
+ kfree(argv_str);
+ return NULL;
+ }
- t = kstrndup(p, str-p, gfp);
- if (t == NULL)
- goto fail;
- *argvp++ = t;
+ *argv = argv_str;
+ argv_ret = ++argv;
+ for (was_space = true; *argv_str; argv_str++) {
+ if (isspace(*argv_str)) {
+ was_space = true;
+ *argv_str = 0;
+ } else if (was_space) {
+ was_space = false;
+ *argv++ = argv_str;
}
}
- *argvp = NULL;
-
- out:
- return argv;
+ *argv = NULL;
- fail:
- argv_free(argv);
- return NULL;
+ if (argcp)
+ *argcp = argc;
+ return argv_ret;
}
EXPORT_SYMBOL(argv_split);
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d1..f8e0e5367398 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
wake_up_klogd();
}
}
-
-
diff --git a/lib/decompress.c b/lib/decompress.c
index 31a804277282..f8fdedaf7b3d 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -38,7 +38,7 @@ struct compress_format {
decompress_fn decompressor;
};
-static const struct compress_format compressed_formats[] __initdata = {
+static const struct compress_format compressed_formats[] __initconst = {
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d0..d87a17a819d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
entry = bucket_find_exact(bucket, ref);
if (!entry) {
+ /* must drop lock before calling dma_mapping_error */
+ put_hash_bucket(bucket, &flags);
+
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
- "DMA-API: device driver tries "
- "to free an invalid DMA memory address\n");
- return;
+ "DMA-API: device driver tries to free an "
+ "invalid DMA memory address\n");
+ } else {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries to free DMA "
+ "memory it has not allocated [device "
+ "address=0x%016llx] [size=%llu bytes]\n",
+ ref->dev_addr, ref->size);
}
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- goto out;
+ return;
}
if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
hash_bucket_del(entry);
dma_entry_free(entry);
-out:
put_hash_bucket(bucket, &flags);
}
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
ref.dev = dev;
ref.dev_addr = dma_addr;
bucket = get_hash_bucket(&ref, &flags);
- entry = bucket_find_exact(bucket, &ref);
- if (!entry)
- goto out;
+ list_for_each_entry(entry, &bucket->list, list) {
+ if (!exact_match(&ref, entry))
+ continue;
+
+ /*
+ * The same physical address can be mapped multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therefore we implement a
+ * best-fit algorithm here which updates the first entry
+ * from the hash which fits the reference value and is
+ * not currently listed as being checked.
+ */
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ entry->map_err_type = MAP_ERR_CHECKED;
+ break;
+ }
+ }
- entry->map_err_type = MAP_ERR_CHECKED;
-out:
put_hash_bucket(bucket, &flags);
}
EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 42f4f55c9458..53bad099ebd6 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,11 +5,16 @@
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/sched.h>
+/**
+ * dump_stack - dump the current task information and its stack trace
+ *
+ * Architectures can override this implementation by implementing its own.
+ */
void dump_stack(void)
{
- printk(KERN_NOTICE
- "This architecture does not implement dump_stack()\n");
+ dump_stack_print_info(KERN_DEFAULT);
+ show_stack(NULL, NULL);
}
-
EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 5276b99ca650..99fec3ae405a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -24,6 +24,7 @@
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/uaccess.h>
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
@@ -276,48 +277,6 @@ static inline int parse_lineno(const char *str, unsigned int *val)
return 0;
}
-/*
- * Undo octal escaping in a string, inplace. This is useful to
- * allow the user to express a query which matches a format
- * containing embedded spaces.
- */
-#define isodigit(c) ((c) >= '0' && (c) <= '7')
-static char *unescape(char *str)
-{
- char *in = str;
- char *out = str;
-
- while (*in) {
- if (*in == '\\') {
- if (in[1] == '\\') {
- *out++ = '\\';
- in += 2;
- continue;
- } else if (in[1] == 't') {
- *out++ = '\t';
- in += 2;
- continue;
- } else if (in[1] == 'n') {
- *out++ = '\n';
- in += 2;
- continue;
- } else if (isodigit(in[1]) &&
- isodigit(in[2]) &&
- isodigit(in[3])) {
- *out++ = (((in[1] - '0') << 6) |
- ((in[2] - '0') << 3) |
- (in[3] - '0'));
- in += 4;
- continue;
- }
- }
- *out++ = *in++;
- }
- *out = '\0';
-
- return str;
-}
-
static int check_set(const char **dest, char *src, char *name)
{
int rc = 0;
@@ -371,8 +330,10 @@ static int ddebug_parse_query(char *words[], int nwords,
} else if (!strcmp(words[i], "module")) {
rc = check_set(&query->module, words[i+1], "module");
} else if (!strcmp(words[i], "format")) {
- rc = check_set(&query->format, unescape(words[i+1]),
- "format");
+ string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
+ UNESCAPE_OCTAL |
+ UNESCAPE_SPECIAL);
+ rc = check_set(&query->format, words[i+1], "format");
} else if (!strcmp(words[i], "line")) {
char *first = words[i+1];
char *last = strchr(first, '-');
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f7210ad6cffd..c5c7a762b850 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -122,7 +122,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
}
- if (attr->probability <= random32() % 100)
+ if (attr->probability <= prandom_u32() % 100)
return false;
if (!fail_stacktrace(attr))
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 54920433705a..b35cfa9bc3d4 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -34,6 +34,8 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
@@ -480,3 +482,82 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
return start_bit;
}
EXPORT_SYMBOL(gen_pool_best_fit);
+
+static void devm_gen_pool_release(struct device *dev, void *res)
+{
+ gen_pool_destroy(*(struct gen_pool **)res);
+}
+
+/**
+ * devm_gen_pool_create - managed gen_pool_create
+ * @dev: device that provides the gen_pool
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ *
+ * Create a new special memory pool that can be used to manage special purpose
+ * memory not managed by the regular kmalloc/kfree interface. The pool will be
+ * automatically destroyed by the device management code.
+ */
+struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
+ int nid)
+{
+ struct gen_pool **ptr, *pool;
+
+ ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
+
+ pool = gen_pool_create(min_alloc_order, nid);
+ if (pool) {
+ *ptr = pool;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return pool;
+}
+
+/**
+ * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: Optional name for the gen_pool, usually NULL
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *dev_get_gen_pool(struct device *dev)
+{
+ struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
+ NULL);
+
+ if (!p)
+ return NULL;
+ return *p;
+}
+EXPORT_SYMBOL_GPL(dev_get_gen_pool);
+
+#ifdef CONFIG_OF
+/**
+ * of_get_named_gen_pool - find a pool by phandle property
+ * @np: device node
+ * @propname: property name containing phandle(s)
+ * @index: index into the phandle array
+ *
+ * Returns the pool that contains the chunk starting at the physical
+ * address of the device tree node pointed at by the phandle property,
+ * or NULL if not found.
+ */
+struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+ const char *propname, int index)
+{
+ struct platform_device *pdev;
+ struct device_node *np_pool;
+
+ np_pool = of_parse_phandle(np, propname, index);
+ if (!np_pool)
+ return NULL;
+ pdev = of_find_device_by_node(np_pool);
+ if (!pdev)
+ return NULL;
+ return dev_get_gen_pool(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
+#endif /* CONFIG_OF */
diff --git a/lib/idr.c b/lib/idr.c
index 00739aaf95a2..cca4b9302a71 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
if (layer_idr)
return get_from_free_list(layer_idr);
- /* try to allocate directly from kmem_cache */
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ /*
+ * Try to allocate directly from kmem_cache. We want to try this
+ * before preload buffer; otherwise, non-preloading idr_alloc()
+ * users will end up taking advantage of preloading ones. As the
+ * following is allowed to fail for preloaded cases, suppress
+ * warning this time.
+ */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
if (new)
return new;
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
* Try to fetch one from the per-cpu preload buffer if in process
* context. See idr_preload() for details.
*/
- if (in_interrupt())
- return NULL;
-
- preempt_disable();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
+ if (!in_interrupt()) {
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ if (new)
+ return new;
}
- preempt_enable();
- return new;
+
+ /*
+ * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
+ * that memory allocation failure warning is printed as intended.
+ */
+ return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
}
static void idr_layer_rcu_free(struct rcu_head *head)
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
-/**
- * idr_pre_get - reserve resources for idr allocation
- * @idp: idr handle
- * @gfp_mask: memory allocation flags
- *
- * This function should be called prior to calling the idr_get_new* functions.
- * It preallocates enough memory to satisfy the worst possible allocation. The
- * caller should pass in GFP_KERNEL if possible. This of course requires that
- * no spinning locks be held.
- *
- * If the system is REALLY out of memory this function returns %0,
- * otherwise %1.
- */
-int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
+int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
return 1;
}
-EXPORT_SYMBOL(idr_pre_get);
+EXPORT_SYMBOL(__idr_pre_get);
/**
* sub_alloc - try to allocate an id without growing the tree depth
* @idp: idr handle
* @starting_id: id to start search at
- * @id: pointer to the allocated handle
* @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
* @gfp_mask: allocation mask for idr_layer_alloc()
* @layer_idr: optional idr passed to idr_layer_alloc()
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
-/**
- * idr_get_new_above - allocate new idr entry above or equal to a start id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @starting_id: id to start search at
- * @id: pointer to the allocated handle
- *
- * This is the allocate id function. It should be called with any
- * required locks.
- *
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
- *
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
- *
- * @id returns a value in the range @starting_id ... %0x7fffffff
- */
-int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
+int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
*id = rv;
return 0;
}
-EXPORT_SYMBOL(idr_get_new_above);
+EXPORT_SYMBOL(__idr_get_new_above);
/**
* idr_preload - preload for idr_alloc()
@@ -515,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(idr_alloc);
+/**
+ * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Essentially the same as idr_alloc, but prefers to allocate progressively
+ * higher ids if it can. If the "cur" counter wraps, then it will start again
+ * at the "start" end of the range and allocate one that has already been used.
+ */
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
+ gfp_t gfp_mask)
+{
+ int id;
+
+ id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
+ if (id == -ENOSPC)
+ id = idr_alloc(idr, ptr, start, end, gfp_mask);
+
+ if (likely(id >= 0))
+ idr->cur = id + 1;
+ return id;
+}
+EXPORT_SYMBOL(idr_alloc_cyclic);
+
static void idr_remove_warning(int id)
{
printk(KERN_WARNING
@@ -908,7 +915,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
/* allocate idr_layers */
- if (!idr_pre_get(&ida->idr, gfp_mask))
+ if (!__idr_pre_get(&ida->idr, gfp_mask))
return 0;
/* allocate free_bitmap */
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index fc2eeb7cb2ea..1ef4cc344977 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -1,3 +1,9 @@
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com>
+ *
+ * Based on the shift-and-subtract algorithm for computing integer
+ * square root from Guy L. Steele.
+ */
#include <linux/kernel.h>
#include <linux/export.h>
@@ -10,23 +16,23 @@
*/
unsigned long int_sqrt(unsigned long x)
{
- unsigned long op, res, one;
+ unsigned long b, m, y = 0;
- op = x;
- res = 0;
+ if (x <= 1)
+ return x;
- one = 1UL << (BITS_PER_LONG - 2);
- while (one > op)
- one >>= 2;
+ m = 1UL << (BITS_PER_LONG - 2);
+ while (m != 0) {
+ b = y + m;
+ y >>= 1;
- while (one != 0) {
- if (op >= res + one) {
- op = op - (res + one);
- res = res + 2 * one;
+ if (x >= b) {
+ x -= b;
+ y += m;
}
- res /= 2;
- one /= 4;
+ m >>= 2;
}
- return res;
+
+ return y;
}
EXPORT_SYMBOL(int_sqrt);
diff --git a/lib/kobject.c b/lib/kobject.c
index e07ee1fcd6f1..a65486613d79 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
return kobj;
}
+static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
+{
+ if (!kref_get_unless_zero(&kobj->kref))
+ kobj = NULL;
+ return kobj;
+}
+
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
- ret = kobject_get(k);
+ ret = kobject_get_unless_zero(k);
break;
}
}
diff --git a/lib/list_sort.c b/lib/list_sort.c
index d7325c6b103f..1183fa70a44d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -229,7 +229,7 @@ static int __init list_sort_test(void)
goto exit;
}
/* force some equivalencies */
- el->value = random32() % (TEST_LIST_LEN/3);
+ el->value = prandom_u32() % (TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 44b92cb6224f..eb4a04afea80 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -17,7 +17,7 @@ static int debugfs_errno_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
-static struct dentry *debugfs_create_errno(const char *name, mode_t mode,
+static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
struct dentry *parent, int *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_errno);
@@ -50,7 +50,7 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
struct notifier_err_inject *err_inject, int priority)
{
struct notifier_err_inject_action *action;
- mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
struct dentry *actions_dir;
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index d8de11f45908..318f382a010d 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/module.h>
#include <linux/export.h>
#include <linux/oid_registry.h>
#include <linux/kernel.h>
@@ -16,6 +17,10 @@
#include <linux/bug.h>
#include "oid_registry_data.c"
+MODULE_DESCRIPTION("OID Registry");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
/**
* look_up_OID - Find an OID registration for the specified data
* @data: Binary representation of the OID
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index af38aedbd874..122f02f9941b 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -117,8 +117,7 @@ static int black_path_count(struct rb_node *rb)
static void check(int nr_nodes)
{
struct rb_node *rb;
- int count = 0;
- int blacks = 0;
+ int count = 0, blacks = 0;
u32 prev_key = 0;
for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
@@ -134,7 +133,9 @@ static void check(int nr_nodes)
prev_key = node->key;
count++;
}
+
WARN_ON_ONCE(count != nr_nodes);
+ WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
}
static void check_augmented(int nr_nodes)
@@ -148,7 +149,7 @@ static void check_augmented(int nr_nodes)
}
}
-static int rbtree_test_init(void)
+static int __init rbtree_test_init(void)
{
int i, j;
cycles_t time1, time2, time;
@@ -221,7 +222,7 @@ static int rbtree_test_init(void)
return -EAGAIN; /* Fail will directly unload the module */
}
-static void rbtree_test_exit(void)
+static void __exit rbtree_test_exit(void)
{
printk(KERN_ALERT "test exit\n");
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 7542afbb22b3..9be8a9144978 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -9,12 +9,15 @@
#include <linux/sched.h>
#include <linux/export.h>
+enum rwsem_waiter_type {
+ RWSEM_WAITING_FOR_WRITE,
+ RWSEM_WAITING_FOR_READ
+};
+
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
- unsigned int flags;
-#define RWSEM_WAITING_FOR_READ 0x00000001
-#define RWSEM_WAITING_FOR_WRITE 0x00000002
+ enum rwsem_waiter_type type;
};
int rwsem_is_locked(struct rw_semaphore *sem)
@@ -67,26 +70,17 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (!wakewrite) {
- if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
- goto out;
- goto dont_wake_writers;
- }
-
- /*
- * as we support write lock stealing, we can't set sem->activity
- * to -1 here to indicate we get the lock. Instead, we wake it up
- * to let it go get it again.
- */
- if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
- wake_up_process(waiter->task);
+ if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
+ if (wakewrite)
+ /* Wake up a writer. Note that we do not grant it the
+ * lock - it will have to acquire it when it runs. */
+ wake_up_process(waiter->task);
goto out;
}
/* grant an infinite number of read locks to the front of the queue */
- dont_wake_writers:
woken = 0;
- while (waiter->flags & RWSEM_WAITING_FOR_READ) {
+ do {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
@@ -96,10 +90,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
- if (list_empty(&sem->wait_list))
+ if (next == &sem->wait_list)
break;
waiter = list_entry(next, struct rwsem_waiter, list);
- }
+ } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
sem->activity += woken;
@@ -144,7 +138,7 @@ void __sched __down_read(struct rw_semaphore *sem)
/* set up my own style of waitqueue */
waiter.task = tsk;
- waiter.flags = RWSEM_WAITING_FOR_READ;
+ waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
@@ -201,7 +195,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
/* set up my own style of waitqueue */
tsk = current;
waiter.task = tsk;
- waiter.flags = RWSEM_WAITING_FOR_WRITE;
+ waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
/* wait for someone to release the lock */
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ad5e0df16ab4..cf0ad2ad19f5 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -4,6 +4,7 @@
* Derived from arch/i386/kernel/semaphore.c
*
* Writer lock-stealing by Alex Shi <alex.shi@intel.com>
+ * and Michel Lespinasse <walken@google.com>
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
@@ -30,21 +31,22 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
EXPORT_SYMBOL(__init_rwsem);
+enum rwsem_waiter_type {
+ RWSEM_WAITING_FOR_WRITE,
+ RWSEM_WAITING_FOR_READ
+};
+
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
- unsigned int flags;
-#define RWSEM_WAITING_FOR_READ 0x00000001
-#define RWSEM_WAITING_FOR_WRITE 0x00000002
+ enum rwsem_waiter_type type;
};
-/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
- * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
- * since the rwsem value was observed.
- */
-#define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
-#define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
-#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
+enum rwsem_wake_type {
+ RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
+ RWSEM_WAKE_READERS, /* Wake readers only */
+ RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
+};
/*
* handle the lock release when processes blocked on it that can now run
@@ -57,46 +59,43 @@ struct rwsem_waiter {
* - writers are only woken if downgrading is false
*/
static struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
+__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
struct list_head *next;
- signed long woken, loop, adjustment;
+ long oldcount, woken, loop, adjustment;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
- goto readers_only;
-
- if (wake_type == RWSEM_WAKE_READ_OWNED)
- /* Another active reader was observed, so wakeup is not
- * likely to succeed. Save the atomic op.
- */
+ if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
+ if (wake_type == RWSEM_WAKE_ANY)
+ /* Wake writer at the front of the queue, but do not
+ * grant it the lock yet as we want other writers
+ * to be able to steal it. Readers, on the other hand,
+ * will block as they will notice the queued writer.
+ */
+ wake_up_process(waiter->task);
goto out;
+ }
- /* Wake up the writing waiter and let the task grab the sem: */
- wake_up_process(waiter->task);
- goto out;
-
- readers_only:
- /* If we come here from up_xxxx(), another thread might have reached
- * rwsem_down_failed_common() before we acquired the spinlock and
- * woken up a waiter, making it now active. We prefer to check for
- * this first in order to not spend too much time with the spinlock
- * held if we're not going to be able to wake up readers in the end.
- *
- * Note that we do not need to update the rwsem count: any writer
- * trying to acquire rwsem will run rwsem_down_write_failed() due
- * to the waiting threads and block trying to acquire the spinlock.
- *
- * We use a dummy atomic update in order to acquire the cache line
- * exclusively since we expect to succeed and run the final rwsem
- * count adjustment pretty soon.
+ /* Writers might steal the lock before we grant it to the next reader.
+ * We prefer to do the first reader grant before counting readers
+ * so we can bail out early if a writer stole the lock.
*/
- if (wake_type == RWSEM_WAKE_ANY &&
- rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
- /* Someone grabbed the sem for write already */
- goto out;
+ adjustment = 0;
+ if (wake_type != RWSEM_WAKE_READ_OWNED) {
+ adjustment = RWSEM_ACTIVE_READ_BIAS;
+ try_reader_grant:
+ oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
+ if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
+ /* A writer stole the lock. Undo our reader grant. */
+ if (rwsem_atomic_update(-adjustment, sem) &
+ RWSEM_ACTIVE_MASK)
+ goto out;
+ /* Last active locker left. Retry waking readers. */
+ goto try_reader_grant;
+ }
+ }
/* Grant an infinite number of read locks to the readers at the front
* of the queue. Note we increment the 'active part' of the count by
@@ -112,17 +111,19 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
waiter = list_entry(waiter->list.next,
struct rwsem_waiter, list);
- } while (waiter->flags & RWSEM_WAITING_FOR_READ);
+ } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
- adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
- if (waiter->flags & RWSEM_WAITING_FOR_READ)
+ adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+ if (waiter->type != RWSEM_WAITING_FOR_WRITE)
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
- rwsem_atomic_add(adjustment, sem);
+ if (adjustment)
+ rwsem_atomic_add(adjustment, sem);
next = sem->wait_list.next;
- for (loop = woken; loop > 0; loop--) {
+ loop = woken;
+ do {
waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
@@ -130,7 +131,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
- }
+ } while (--loop);
sem->wait_list.next = next;
next->prev = &sem->wait_list;
@@ -139,60 +140,21 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
return sem;
}
-/* Try to get write sem, caller holds sem->wait_lock: */
-static int try_get_writer_sem(struct rw_semaphore *sem,
- struct rwsem_waiter *waiter)
-{
- struct rwsem_waiter *fwaiter;
- long oldcount, adjustment;
-
- /* only steal when first waiter is writing */
- fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE))
- return 0;
-
- adjustment = RWSEM_ACTIVE_WRITE_BIAS;
- /* Only one waiter in the queue: */
- if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
- adjustment -= RWSEM_WAITING_BIAS;
-
-try_again_write:
- oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
- if (!(oldcount & RWSEM_ACTIVE_MASK)) {
- /* No active lock: */
- struct task_struct *tsk = waiter->task;
-
- list_del(&waiter->list);
- smp_mb();
- put_task_struct(tsk);
- tsk->state = TASK_RUNNING;
- return 1;
- }
- /* some one grabbed the sem already */
- if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
- return 0;
- goto try_again_write;
-}
-
/*
- * wait for a lock to be granted
+ * wait for the read lock to be granted
*/
-static struct rw_semaphore __sched *
-rwsem_down_failed_common(struct rw_semaphore *sem,
- unsigned int flags, signed long adjustment)
+struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
+ long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter;
struct task_struct *tsk = current;
- signed long count;
-
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- raw_spin_lock_irq(&sem->wait_lock);
waiter.task = tsk;
- waiter.flags = flags;
+ waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
+ raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list))
adjustment += RWSEM_WAITING_BIAS;
list_add_tail(&waiter.list, &sem->wait_list);
@@ -200,35 +162,24 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/* we're now waiting on the lock, but no longer actively locking */
count = rwsem_atomic_update(adjustment, sem);
- /* If there are no active locks, wake the front queued process(es) up.
+ /* If there are no active locks, wake the front queued process(es).
*
- * Alternatively, if we're called from a failed down_write(), there
- * were already threads queued before us and there are no active
- * writers, the lock must be read owned; so we try to wake any read
- * locks that were queued ahead of us. */
- if (count == RWSEM_WAITING_BIAS)
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
- else if (count > RWSEM_WAITING_BIAS &&
- adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+ * If there are no writers and we are first in the queue,
+ * wake our own waiter to join the existing active readers !
+ */
+ if (count == RWSEM_WAITING_BIAS ||
+ (count > RWSEM_WAITING_BIAS &&
+ adjustment != -RWSEM_ACTIVE_READ_BIAS))
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
- for (;;) {
+ while (true) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!waiter.task)
break;
-
- raw_spin_lock_irq(&sem->wait_lock);
- /* Try to get the writer sem, may steal from the head writer: */
- if (flags == RWSEM_WAITING_FOR_WRITE)
- if (try_get_writer_sem(sem, &waiter)) {
- raw_spin_unlock_irq(&sem->wait_lock);
- return sem;
- }
- raw_spin_unlock_irq(&sem->wait_lock);
schedule();
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
@@ -237,21 +188,62 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
}
/*
- * wait for the read lock to be granted
- */
-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
-{
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
- -RWSEM_ACTIVE_READ_BIAS);
-}
-
-/*
- * wait for the write lock to be granted
+ * wait until we successfully acquire the write lock
*/
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
{
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
- -RWSEM_ACTIVE_WRITE_BIAS);
+ long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk = current;
+
+ /* set up my own style of waitqueue */
+ waiter.task = tsk;
+ waiter.type = RWSEM_WAITING_FOR_WRITE;
+
+ raw_spin_lock_irq(&sem->wait_lock);
+ if (list_empty(&sem->wait_list))
+ adjustment += RWSEM_WAITING_BIAS;
+ list_add_tail(&waiter.list, &sem->wait_list);
+
+ /* we're now waiting on the lock, but no longer actively locking */
+ count = rwsem_atomic_update(adjustment, sem);
+
+ /* If there were already threads queued before us and there are no
+ * active writers, the lock must be read owned; so we try to wake
+ * any read locks that were queued ahead of us. */
+ if (count > RWSEM_WAITING_BIAS &&
+ adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
+
+ /* wait until we successfully acquire the lock */
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ while (true) {
+ if (!(count & RWSEM_ACTIVE_MASK)) {
+ /* Try acquiring the write lock. */
+ count = RWSEM_ACTIVE_WRITE_BIAS;
+ if (!list_is_singular(&sem->wait_list))
+ count += RWSEM_WAITING_BIAS;
+ if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
+ RWSEM_WAITING_BIAS)
+ break;
+ }
+
+ raw_spin_unlock_irq(&sem->wait_lock);
+
+ /* Block until there are no active lockers. */
+ do {
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
+
+ raw_spin_lock_irq(&sem->wait_lock);
+ }
+
+ list_del(&waiter.list);
+ raw_spin_unlock_irq(&sem->wait_lock);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
}
/*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b83c144d731f..a1cf8cae60e7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -401,7 +401,6 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
piter->__pg_advance = 0;
piter->__nents = nents;
- piter->page = NULL;
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
@@ -426,7 +425,6 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
if (!--piter->__nents || !piter->sg)
return false;
}
- piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
return true;
}
@@ -496,7 +494,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
- miter->page = miter->piter.page;
+ miter->page = sg_page_iter_page(&miter->piter);
miter->consumed = miter->length = miter->__remaining;
if (miter->__flags & SG_MITER_ATOMIC)
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 4407f8c9b1f7..b7c72311ad0c 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
printk("Mem-Info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
+
for_each_online_pgdat(pgdat) {
unsigned long i, flags;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 1cffc223bff5..ed5c1454dd62 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -2,10 +2,12 @@
* Helpers for formatting and printing strings
*
* Copyright 31 August 2008 James Bottomley
+ * Copyright (C) 2013, Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/export.h>
+#include <linux/ctype.h>
#include <linux/string_helpers.h>
/**
@@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units,
return 0;
}
EXPORT_SYMBOL(string_get_size);
+
+static bool unescape_space(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+
+ switch (*q) {
+ case 'n':
+ *p = '\n';
+ break;
+ case 'r':
+ *p = '\r';
+ break;
+ case 't':
+ *p = '\t';
+ break;
+ case 'v':
+ *p = '\v';
+ break;
+ case 'f':
+ *p = '\f';
+ break;
+ default:
+ return false;
+ }
+ *dst += 1;
+ *src += 1;
+ return true;
+}
+
+static bool unescape_octal(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+ u8 num;
+
+ if (isodigit(*q) == 0)
+ return false;
+
+ num = (*q++) & 7;
+ while (num < 32 && isodigit(*q) && (q - *src < 3)) {
+ num <<= 3;
+ num += (*q++) & 7;
+ }
+ *p = num;
+ *dst += 1;
+ *src = q;
+ return true;
+}
+
+static bool unescape_hex(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+ int digit;
+ u8 num;
+
+ if (*q++ != 'x')
+ return false;
+
+ num = digit = hex_to_bin(*q++);
+ if (digit < 0)
+ return false;
+
+ digit = hex_to_bin(*q);
+ if (digit >= 0) {
+ q++;
+ num = (num << 4) | digit;
+ }
+ *p = num;
+ *dst += 1;
+ *src = q;
+ return true;
+}
+
+static bool unescape_special(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+
+ switch (*q) {
+ case '\"':
+ *p = '\"';
+ break;
+ case '\\':
+ *p = '\\';
+ break;
+ case 'a':
+ *p = '\a';
+ break;
+ case 'e':
+ *p = '\e';
+ break;
+ default:
+ return false;
+ }
+ *dst += 1;
+ *src += 1;
+ return true;
+}
+
+int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
+{
+ char *out = dst;
+
+ while (*src && --size) {
+ if (src[0] == '\\' && src[1] != '\0' && size > 1) {
+ src++;
+ size--;
+
+ if (flags & UNESCAPE_SPACE &&
+ unescape_space(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_OCTAL &&
+ unescape_octal(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_HEX &&
+ unescape_hex(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_SPECIAL &&
+ unescape_special(&src, &out))
+ continue;
+
+ *out++ = '\\';
+ }
+ *out++ = *src++;
+ }
+ *out = '\0';
+
+ return out - dst;
+}
+EXPORT_SYMBOL(string_unescape);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bfe02b8fc55b..d23762e6652c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)
if (!strcmp(str, "force"))
swiotlb_force = 1;
- return 1;
+ return 0;
}
-__setup("swiotlb=", setup_io_tlb_npages);
+early_param("swiotlb", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)
return io_tlb_nslabs;
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+unsigned long swiotlb_size_or_default(void)
+{
+ unsigned long size;
+
+ size = io_tlb_nslabs << IO_TLB_SHIFT;
+
+ return size ? size : (IO_TLB_DEFAULT_SIZE);
+}
+
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
void __init
swiotlb_init(int verbose)
{
- /* default to 64MB */
- size_t default_size = 64UL<<20;
+ size_t default_size = IO_TLB_DEFAULT_SIZE;
unsigned char *vstart;
unsigned long bytes;
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
new file mode 100644
index 000000000000..6ac48de04c0e
--- /dev/null
+++ b/lib/test-string_helpers.c
@@ -0,0 +1,103 @@
+/*
+ * Test cases for lib/string_helpers.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
+struct test_string {
+ const char *in;
+ const char *out;
+ unsigned int flags;
+};
+
+static const struct test_string strings[] __initconst = {
+ {
+ .in = "\\f\\ \\n\\r\\t\\v",
+ .out = "\f\\ \n\r\t\v",
+ .flags = UNESCAPE_SPACE,
+ },
+ {
+ .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
+ .out = " \001\00387\0064\005 \\8aH?7",
+ .flags = UNESCAPE_OCTAL,
+ },
+ {
+ .in = "\\xv\\xa\\x2c\\xD\\x6f2",
+ .out = "\\xv\n,\ro2",
+ .flags = UNESCAPE_HEX,
+ },
+ {
+ .in = "\\h\\\\\\\"\\a\\e\\",
+ .out = "\\h\\\"\a\e\\",
+ .flags = UNESCAPE_SPECIAL,
+ },
+};
+
+static void __init test_string_unescape(unsigned int flags, bool inplace)
+{
+ char in[256];
+ char out_test[256];
+ char out_real[256];
+ int i, p = 0, q_test = 0, q_real = sizeof(out_real);
+
+ for (i = 0; i < ARRAY_SIZE(strings); i++) {
+ const char *s = strings[i].in;
+ int len = strlen(strings[i].in);
+
+ /* Copy string to in buffer */
+ memcpy(&in[p], s, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ if (flags & strings[i].flags) {
+ s = strings[i].out;
+ len = strlen(strings[i].out);
+ }
+ memcpy(&out_test[q_test], s, len);
+ q_test += len;
+ }
+ in[p++] = '\0';
+
+ /* Call string_unescape and compare result */
+ if (inplace) {
+ memcpy(out_real, in, p);
+ if (flags == UNESCAPE_ANY)
+ q_real = string_unescape_any_inplace(out_real);
+ else
+ q_real = string_unescape_inplace(out_real, flags);
+ } else if (flags == UNESCAPE_ANY) {
+ q_real = string_unescape_any(in, out_real, q_real);
+ } else {
+ q_real = string_unescape(in, out_real, q_real, flags);
+ }
+
+ if (q_real != q_test || memcmp(out_test, out_real, q_test)) {
+ pr_warn("Test failed: flags = %u\n", flags);
+ print_hex_dump(KERN_WARNING, "Input: ",
+ DUMP_PREFIX_NONE, 16, 1, in, p - 1, true);
+ print_hex_dump(KERN_WARNING, "Expected: ",
+ DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true);
+ print_hex_dump(KERN_WARNING, "Got: ",
+ DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true);
+ }
+}
+
+static int __init test_string_helpers_init(void)
+{
+ unsigned int i;
+
+ pr_info("Running tests...\n");
+ for (i = 0; i < UNESCAPE_ANY + 1; i++)
+ test_string_unescape(i, false);
+ test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true);
+
+ return -EINVAL;
+}
+module_init(test_string_helpers_init);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644
index 000000000000..6f500ef2301d
--- /dev/null
+++ b/lib/ucs2_string.c
@@ -0,0 +1,51 @@
+#include <linux/ucs2_string.h>
+#include <linux/module.h>
+
+/* Return the number of unicode characters in data */
+unsigned long
+ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
+{
+ unsigned long length = 0;
+
+ while (*s++ != 0 && length < maxlength)
+ length++;
+ return length;
+}
+EXPORT_SYMBOL(ucs2_strnlen);
+
+unsigned long
+ucs2_strlen(const ucs2_char_t *s)
+{
+ return ucs2_strnlen(s, ~0UL);
+}
+EXPORT_SYMBOL(ucs2_strlen);
+
+/*
+ * Return the number of bytes is the length of this string
+ * Note: this is NOT the same as the number of unicode characters
+ */
+unsigned long
+ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
+{
+ return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
+}
+EXPORT_SYMBOL(ucs2_strsize);
+
+int
+ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
+}
+EXPORT_SYMBOL(ucs2_strncmp);
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..4f5b1ddbcd25
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,9 @@
+#include <linux/export.h>
+#include <linux/bug.h>
+#include <linux/uaccess.h>
+
+void copy_from_user_overflow(void)
+{
+ WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/lib/uuid.c b/lib/uuid.c
index 52a6fe6387de..398821e4dce1 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -25,13 +25,7 @@
static void __uuid_gen_common(__u8 b[16])
{
- int i;
- u32 r;
-
- for (i = 0; i < 4; i++) {
- r = random32();
- memcpy(b + i * 4, &r, 4);
- }
+ prandom_bytes(b, 16);
/* reversion 0b10 */
b[8] = (b[8] & 0x3F) | 0x80;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0d62fd700f68..e149c6416384 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -534,14 +534,21 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
- struct printf_spec spec, char ext)
+ struct printf_spec spec, const char *fmt)
{
- unsigned long value = (unsigned long) ptr;
+ unsigned long value;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
- if (ext == 'B')
+#endif
+
+ if (fmt[1] == 'R')
+ ptr = __builtin_extract_return_addr(ptr);
+ value = (unsigned long)ptr;
+
+#ifdef CONFIG_KALLSYMS
+ if (*fmt == 'B')
sprint_backtrace(sym, value);
- else if (ext != 'f' && ext != 's')
+ else if (*fmt != 'f' && *fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
@@ -987,6 +994,7 @@ int kptr_restrict __read_mostly;
* - 'f' For simple symbolic function names without offset
* - 'S' For symbolic direct pointers with offset
* - 's' For symbolic direct pointers without offset
+ * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -1060,7 +1068,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'S':
case 's':
case 'B':
- return symbol_string(buf, end, ptr, spec, *fmt);
+ return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
return resource_string(buf, end, ptr, spec, fmt);
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 82a04d7ba99e..08837db52d94 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -15,7 +15,7 @@ config XZ_DEC_X86
config XZ_DEC_POWERPC
bool "PowerPC BCJ filter decoder"
- default y if POWERPC
+ default y if PPC
select XZ_DEC_BCJ
config XZ_DEC_IA64