summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/Makefile25
-rw-r--r--lib/asn1_decoder.c487
-rwxr-xr-xlib/build_OID_registry209
-rw-r--r--lib/crc32.c9
-rw-r--r--lib/decompress.c9
-rw-r--r--lib/dma-debug.c5
-rw-r--r--lib/gcd.c3
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c88
-rw-r--r--lib/idr.c32
-rw-r--r--lib/interval_tree.c10
-rw-r--r--lib/interval_tree_test_main.c105
-rw-r--r--lib/kasprintf.c2
-rw-r--r--lib/mpi/Makefile1
-rw-r--r--lib/mpi/longlong.h138
-rw-r--r--lib/mpi/mpi-bit.c2
-rw-r--r--lib/mpi/mpi-cmp.c70
-rw-r--r--lib/mpi/mpi-pow.c4
-rw-r--r--lib/mpi/mpicoder.c55
-rw-r--r--lib/oid_registry.c170
-rw-r--r--lib/parser.c10
-rw-r--r--lib/plist.c4
-rw-r--r--lib/prio_tree.c466
-rw-r--r--lib/rbtree.c656
-rw-r--r--lib/rbtree_test.c234
-rw-r--r--lib/scatterlist.c35
-rw-r--r--lib/spinlock_debug.c32
-rw-r--r--lib/vsprintf.c139
31 files changed, 2002 insertions, 1058 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index 3bef1ea94c99..09aae85418ab 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -3,4 +3,4 @@
#
gen_crc32table
crc32table.h
-
+oid_registry_data.c
diff --git a/lib/Kconfig b/lib/Kconfig
index bb94c1ba616a..4b31a46fb307 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -396,4 +396,9 @@ config SIGNATURE
config LIBFDT
bool
+config OID_REGISTRY
+ tristate
+ help
+ Enable fast lookup object identifier registry.
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35c4565ee8fa..28e9d6c98941 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,12 +196,13 @@ config LOCKUP_DETECTOR
thresholds can be controlled through the sysctl watchdog_thresh.
config HARDLOCKUP_DETECTOR
- def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
- !HAVE_NMI_WATCHDOG
+ def_bool y
+ depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "hard lockups",
which are bugs that cause the kernel to loop in kernel
@@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC
config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
int
- depends on LOCKUP_DETECTOR
+ depends on HARDLOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
default 1 if BOOTPARAM_HARDLOCKUP_PANIC
@@ -449,12 +450,12 @@ config SLUB_STATS
out which slabs are relevant to a particular load.
Try running: slabinfo -DA
+config HAVE_DEBUG_KMEMLEAK
+ bool
+
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
- depends on DEBUG_KERNEL && EXPERIMENTAL && \
- (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || \
- MICROBLAZE || TILE || ARM64)
-
+ depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK
select DEBUG_FS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
@@ -750,12 +751,12 @@ config DEBUG_HIGHMEM
This options enables addition error checking for high memory systems.
Disable for production systems.
+config HAVE_DEBUG_BUGVERBOSE
+ bool
+
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
- depends on BUG
- depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
- FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || \
- TILE || ARM64
+ depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
default y
help
Say Y here to make BUG() panics output the file name and line number
@@ -797,6 +798,15 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VM_RB
+ bool "Debug VM red-black trees"
+ depends on DEBUG_VM
+ help
+ Enable this to turn on more extended checks in the virtual-memory
+ system that may impact performance.
+
+ If unsure, say N.
+
config DEBUG_VIRTUAL
bool "Debug VM translations"
depends on DEBUG_KERNEL && X86
@@ -1281,6 +1291,19 @@ config LATENCYTOP
source mm/Kconfig.debug
source kernel/trace/Kconfig
+config RBTREE_TEST
+ tristate "Red-Black tree test"
+ depends on m && DEBUG_KERNEL
+ help
+ A benchmark measuring the performance of the rbtree library.
+ Also includes rbtree invariant checks.
+
+config INTERVAL_TREE_TEST
+ tristate "Interval tree test"
+ depends on m && DEBUG_KERNEL
+ help
+ A benchmark measuring the performance of the interval tree library
+
config PROVIDE_OHCI1394_DMA_INIT
bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
diff --git a/lib/Makefile b/lib/Makefile
index 42d283edc4d3..821a16229111 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -9,7 +9,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
- idr.o int_sqrt.o extable.o prio_tree.o \
+ idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o
@@ -140,6 +140,13 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
+obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
+
+interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
+
+obj-$(CONFIG_ASN1) += asn1_decoder.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
@@ -150,3 +157,19 @@ quiet_cmd_crc32 = GEN $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)
+
+#
+# Build a fast OID lookip registry from include/linux/oid_registry.h
+#
+obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
+
+$(obj)/oid_registry.c: $(obj)/oid_registry_data.c
+
+$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
+ $(src)/build_OID_registry
+ $(call cmd,build_OID_registry)
+
+quiet_cmd_build_OID_registry = GEN $@
+ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
+
+clean-files += oid_registry_data.c
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
new file mode 100644
index 000000000000..de2c8b5a715b
--- /dev/null
+++ b/lib/asn1_decoder.c
@@ -0,0 +1,487 @@
+/* Decoder for ASN.1 BER/DER/CER encoded bytestream
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/asn1_decoder.h>
+#include <linux/asn1_ber_bytecode.h>
+
+static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
+ /* OPC TAG JMP ACT */
+ [ASN1_OP_MATCH] = 1 + 1,
+ [ASN1_OP_MATCH_OR_SKIP] = 1 + 1,
+ [ASN1_OP_MATCH_ACT] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_ANY] = 1,
+ [ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY] = 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_FAIL] = 1,
+ [ASN1_OP_COMPLETE] = 1,
+ [ASN1_OP_ACT] = 1 + 1,
+ [ASN1_OP_RETURN] = 1,
+ [ASN1_OP_END_SEQ] = 1,
+ [ASN1_OP_END_SEQ_OF] = 1 + 1,
+ [ASN1_OP_END_SET] = 1,
+ [ASN1_OP_END_SET_OF] = 1 + 1,
+ [ASN1_OP_END_SEQ_ACT] = 1 + 1,
+ [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1,
+ [ASN1_OP_END_SET_ACT] = 1 + 1,
+ [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1,
+};
+
+/*
+ * Find the length of an indefinite length object
+ * @data: The data buffer
+ * @datalen: The end of the innermost containing element in the buffer
+ * @_dp: The data parse cursor (updated before returning)
+ * @_len: Where to return the size of the element.
+ * @_errmsg: Where to return a pointer to an error message on error
+ */
+static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen,
+ size_t *_dp, size_t *_len,
+ const char **_errmsg)
+{
+ unsigned char tag, tmp;
+ size_t dp = *_dp, len, n;
+ int indef_level = 1;
+
+next_tag:
+ if (unlikely(datalen - dp < 2)) {
+ if (datalen == dp)
+ goto missing_eoc;
+ goto data_overrun_error;
+ }
+
+ /* Extract a tag from the data */
+ tag = data[dp++];
+ if (tag == 0) {
+ /* It appears to be an EOC. */
+ if (data[dp++] != 0)
+ goto invalid_eoc;
+ if (--indef_level <= 0) {
+ *_len = dp - *_dp;
+ *_dp = dp;
+ return 0;
+ }
+ goto next_tag;
+ }
+
+ if (unlikely((tag & 0x1f) == 0x1f)) {
+ do {
+ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ tmp = data[dp++];
+ } while (tmp & 0x80);
+ }
+
+ /* Extract the length */
+ len = data[dp++];
+ if (len < 0x7f) {
+ dp += len;
+ goto next_tag;
+ }
+
+ if (unlikely(len == 0x80)) {
+ /* Indefinite length */
+ if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
+ goto indefinite_len_primitive;
+ indef_level++;
+ goto next_tag;
+ }
+
+ n = len - 0x80;
+ if (unlikely(n > sizeof(size_t) - 1))
+ goto length_too_long;
+ if (unlikely(n > datalen - dp))
+ goto data_overrun_error;
+ for (len = 0; n > 0; n--) {
+ len <<= 8;
+ len |= data[dp++];
+ }
+ dp += len;
+ goto next_tag;
+
+length_too_long:
+ *_errmsg = "Unsupported length";
+ goto error;
+indefinite_len_primitive:
+ *_errmsg = "Indefinite len primitive not permitted";
+ goto error;
+invalid_eoc:
+ *_errmsg = "Invalid length EOC";
+ goto error;
+data_overrun_error:
+ *_errmsg = "Data overrun error";
+ goto error;
+missing_eoc:
+ *_errmsg = "Missing EOC in indefinite len cons";
+error:
+ *_dp = dp;
+ return -1;
+}
+
+/**
+ * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern
+ * @decoder: The decoder definition (produced by asn1_compiler)
+ * @context: The caller's context (to be passed to the action functions)
+ * @data: The encoded data
+ * @datasize: The size of the encoded data
+ *
+ * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
+ * produced by asn1_compiler. Action functions are called on marked tags to
+ * allow the caller to retrieve significant data.
+ *
+ * LIMITATIONS:
+ *
+ * To keep down the amount of stack used by this function, the following limits
+ * have been imposed:
+ *
+ * (1) This won't handle datalen > 65535 without increasing the size of the
+ * cons stack elements and length_too_long checking.
+ *
+ * (2) The stack of constructed types is 10 deep. If the depth of non-leaf
+ * constructed types exceeds this, the decode will fail.
+ *
+ * (3) The SET type (not the SET OF type) isn't really supported as tracking
+ * what members of the set have been seen is a pain.
+ */
+int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ void *context,
+ const unsigned char *data,
+ size_t datalen)
+{
+ const unsigned char *machine = decoder->machine;
+ const asn1_action_t *actions = decoder->actions;
+ size_t machlen = decoder->machlen;
+ enum asn1_opcode op;
+ unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0;
+ const char *errmsg;
+ size_t pc = 0, dp = 0, tdp = 0, len = 0;
+ int ret;
+
+ unsigned char flags = 0;
+#define FLAG_INDEFINITE_LENGTH 0x01
+#define FLAG_MATCHED 0x02
+#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
+ * - ie. whether or not we are going to parse
+ * a compound type.
+ */
+
+#define NR_CONS_STACK 10
+ unsigned short cons_dp_stack[NR_CONS_STACK];
+ unsigned short cons_datalen_stack[NR_CONS_STACK];
+ unsigned char cons_hdrlen_stack[NR_CONS_STACK];
+#define NR_JUMP_STACK 10
+ unsigned char jump_stack[NR_JUMP_STACK];
+
+ if (datalen > 65535)
+ return -EMSGSIZE;
+
+next_op:
+ pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n",
+ pc, machlen, dp, datalen, csp, jsp);
+ if (unlikely(pc >= machlen))
+ goto machine_overrun_error;
+ op = machine[pc];
+ if (unlikely(pc + asn1_op_lengths[op] > machlen))
+ goto machine_overrun_error;
+
+ /* If this command is meant to match a tag, then do that before
+ * evaluating the command.
+ */
+ if (op <= ASN1_OP__MATCHES_TAG) {
+ unsigned char tmp;
+
+ /* Skip conditional matches if possible */
+ if ((op & ASN1_OP_MATCH__COND &&
+ flags & FLAG_MATCHED) ||
+ dp == datalen) {
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
+
+ flags = 0;
+ hdr = 2;
+
+ /* Extract a tag from the data */
+ if (unlikely(dp >= datalen - 1))
+ goto data_overrun_error;
+ tag = data[dp++];
+ if (unlikely((tag & 0x1f) == 0x1f))
+ goto long_tag_not_supported;
+
+ if (op & ASN1_OP_MATCH__ANY) {
+ pr_debug("- any %02x\n", tag);
+ } else {
+ /* Extract the tag from the machine
+ * - Either CONS or PRIM are permitted in the data if
+ * CONS is not set in the op stream, otherwise CONS
+ * is mandatory.
+ */
+ optag = machine[pc + 1];
+ flags |= optag & FLAG_CONS;
+
+ /* Determine whether the tag matched */
+ tmp = optag ^ tag;
+ tmp &= ~(optag & ASN1_CONS_BIT);
+ pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp);
+ if (tmp != 0) {
+ /* All odd-numbered tags are MATCH_OR_SKIP. */
+ if (op & ASN1_OP_MATCH__SKIP) {
+ pc += asn1_op_lengths[op];
+ dp--;
+ goto next_op;
+ }
+ goto tag_mismatch;
+ }
+ }
+ flags |= FLAG_MATCHED;
+
+ len = data[dp++];
+ if (len > 0x7f) {
+ if (unlikely(len == 0x80)) {
+ /* Indefinite length */
+ if (unlikely(!(tag & ASN1_CONS_BIT)))
+ goto indefinite_len_primitive;
+ flags |= FLAG_INDEFINITE_LENGTH;
+ if (unlikely(2 > datalen - dp))
+ goto data_overrun_error;
+ } else {
+ int n = len - 0x80;
+ if (unlikely(n > 2))
+ goto length_too_long;
+ if (unlikely(dp >= datalen - n))
+ goto data_overrun_error;
+ hdr += n;
+ for (len = 0; n > 0; n--) {
+ len <<= 8;
+ len |= data[dp++];
+ }
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
+ }
+ }
+
+ if (flags & FLAG_CONS) {
+ /* For expected compound forms, we stack the positions
+ * of the start and end of the data.
+ */
+ if (unlikely(csp >= NR_CONS_STACK))
+ goto cons_stack_overflow;
+ cons_dp_stack[csp] = dp;
+ cons_hdrlen_stack[csp] = hdr;
+ if (!(flags & FLAG_INDEFINITE_LENGTH)) {
+ cons_datalen_stack[csp] = datalen;
+ datalen = dp + len;
+ } else {
+ cons_datalen_stack[csp] = 0;
+ }
+ csp++;
+ }
+
+ pr_debug("- TAG: %02x %zu%s\n",
+ tag, len, flags & FLAG_CONS ? " CONS" : "");
+ tdp = dp;
+ }
+
+ /* Decide how to handle the operation */
+ switch (op) {
+ case ASN1_OP_MATCH_ANY_ACT:
+ case ASN1_OP_COND_MATCH_ANY_ACT:
+ ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
+ if (ret < 0)
+ return ret;
+ goto skip_data;
+
+ case ASN1_OP_MATCH_ACT:
+ case ASN1_OP_MATCH_ACT_OR_SKIP:
+ case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
+ ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
+ if (ret < 0)
+ return ret;
+ goto skip_data;
+
+ case ASN1_OP_MATCH:
+ case ASN1_OP_MATCH_OR_SKIP:
+ case ASN1_OP_MATCH_ANY:
+ case ASN1_OP_COND_MATCH_OR_SKIP:
+ case ASN1_OP_COND_MATCH_ANY:
+ skip_data:
+ if (!(flags & FLAG_CONS)) {
+ if (flags & FLAG_INDEFINITE_LENGTH) {
+ ret = asn1_find_indefinite_length(
+ data, datalen, &dp, &len, &errmsg);
+ if (ret < 0)
+ goto error;
+ } else {
+ dp += len;
+ }
+ pr_debug("- LEAF: %zu\n", len);
+ }
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_MATCH_JUMP:
+ case ASN1_OP_MATCH_JUMP_OR_SKIP:
+ case ASN1_OP_COND_MATCH_JUMP_OR_SKIP:
+ pr_debug("- MATCH_JUMP\n");
+ if (unlikely(jsp == NR_JUMP_STACK))
+ goto jump_stack_overflow;
+ jump_stack[jsp++] = pc + asn1_op_lengths[op];
+ pc = machine[pc + 2];
+ goto next_op;
+
+ case ASN1_OP_COND_FAIL:
+ if (unlikely(!(flags & FLAG_MATCHED)))
+ goto tag_mismatch;
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_COMPLETE:
+ if (unlikely(jsp != 0 || csp != 0)) {
+ pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n",
+ jsp, csp);
+ return -EBADMSG;
+ }
+ return 0;
+
+ case ASN1_OP_END_SET:
+ case ASN1_OP_END_SET_ACT:
+ if (unlikely(!(flags & FLAG_MATCHED)))
+ goto tag_mismatch;
+ case ASN1_OP_END_SEQ:
+ case ASN1_OP_END_SET_OF:
+ case ASN1_OP_END_SEQ_OF:
+ case ASN1_OP_END_SEQ_ACT:
+ case ASN1_OP_END_SET_OF_ACT:
+ case ASN1_OP_END_SEQ_OF_ACT:
+ if (unlikely(csp <= 0))
+ goto cons_stack_underflow;
+ csp--;
+ tdp = cons_dp_stack[csp];
+ hdr = cons_hdrlen_stack[csp];
+ len = datalen;
+ datalen = cons_datalen_stack[csp];
+ pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n",
+ tdp, dp, len, datalen);
+ if (datalen == 0) {
+ /* Indefinite length - check for the EOC. */
+ datalen = len;
+ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ if (data[dp++] != 0) {
+ if (op & ASN1_OP_END__OF) {
+ dp--;
+ csp++;
+ pc = machine[pc + 1];
+ pr_debug("- continue\n");
+ goto next_op;
+ }
+ goto missing_eoc;
+ }
+ if (data[dp++] != 0)
+ goto invalid_eoc;
+ len = dp - tdp - 2;
+ } else {
+ if (dp < len && (op & ASN1_OP_END__OF)) {
+ datalen = len;
+ csp++;
+ pc = machine[pc + 1];
+ pr_debug("- continue\n");
+ goto next_op;
+ }
+ if (dp != len)
+ goto cons_length_error;
+ len -= tdp;
+ pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp);
+ }
+
+ if (op & ASN1_OP_END__ACT) {
+ unsigned char act;
+ if (op & ASN1_OP_END__OF)
+ act = machine[pc + 2];
+ else
+ act = machine[pc + 1];
+ ret = actions[act](context, hdr, 0, data + tdp, len);
+ }
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_ACT:
+ ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_RETURN:
+ if (unlikely(jsp <= 0))
+ goto jump_stack_underflow;
+ pc = jump_stack[--jsp];
+ goto next_op;
+
+ default:
+ break;
+ }
+
+ /* Shouldn't reach here */
+ pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
+ return -EBADMSG;
+
+data_overrun_error:
+ errmsg = "Data overrun error";
+ goto error;
+machine_overrun_error:
+ errmsg = "Machine overrun error";
+ goto error;
+jump_stack_underflow:
+ errmsg = "Jump stack underflow";
+ goto error;
+jump_stack_overflow:
+ errmsg = "Jump stack overflow";
+ goto error;
+cons_stack_underflow:
+ errmsg = "Cons stack underflow";
+ goto error;
+cons_stack_overflow:
+ errmsg = "Cons stack overflow";
+ goto error;
+cons_length_error:
+ errmsg = "Cons length error";
+ goto error;
+missing_eoc:
+ errmsg = "Missing EOC in indefinite len cons";
+ goto error;
+invalid_eoc:
+ errmsg = "Invalid length EOC";
+ goto error;
+length_too_long:
+ errmsg = "Unsupported length";
+ goto error;
+indefinite_len_primitive:
+ errmsg = "Indefinite len primitive not permitted";
+ goto error;
+tag_mismatch:
+ errmsg = "Unexpected tag";
+ goto error;
+long_tag_not_supported:
+ errmsg = "Long tag not supported";
+error:
+ pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n",
+ errmsg, pc, dp, optag, tag, len);
+ return -EBADMSG;
+}
+EXPORT_SYMBOL_GPL(asn1_ber_decoder);
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
new file mode 100755
index 000000000000..dfbdaab81bc8
--- /dev/null
+++ b/lib/build_OID_registry
@@ -0,0 +1,209 @@
+#!/usr/bin/perl -w
+#
+# Build a static ASN.1 Object Identified (OID) registry
+#
+# Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+# Written by David Howells (dhowells@redhat.com)
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public Licence
+# as published by the Free Software Foundation; either version
+# 2 of the Licence, or (at your option) any later version.
+#
+
+use strict;
+
+my @names = ();
+my @oids = ();
+
+if ($#ARGV != 1) {
+ print STDERR "Format: ", $0, " <in-h-file> <out-c-file>\n";
+ exit(2);
+}
+
+#
+# Open the file to read from
+#
+open IN_FILE, "<$ARGV[0]" || die;
+while (<IN_FILE>) {
+ chomp;
+ if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) {
+ push @names, $1;
+ push @oids, $2;
+ }
+}
+close IN_FILE || die;
+
+#
+# Open the files to write into
+#
+open C_FILE, ">$ARGV[1]" or die;
+print C_FILE "/*\n";
+print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
+print C_FILE " */\n";
+
+#
+# Split the data up into separate lists and also determine the lengths of the
+# encoded data arrays.
+#
+my @indices = ();
+my @lengths = ();
+my $total_length = 0;
+
+print "Compiling ", $#names + 1, " OIDs\n";
+
+for (my $i = 0; $i <= $#names; $i++) {
+ my $name = $names[$i];
+ my $oid = $oids[$i];
+
+ my @components = split(/[.]/, $oid);
+
+ # Determine the encoded length of this OID
+ my $size = $#components;
+ for (my $loop = 2; $loop <= $#components; $loop++) {
+ my $c = $components[$loop];
+
+ # We will base128 encode the number
+ my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ $tmp = int($tmp / 7);
+ $size += $tmp;
+ }
+ push @lengths, $size;
+ push @indices, $total_length;
+ $total_length += $size;
+}
+
+#
+# Emit the look-up-by-OID index table
+#
+print C_FILE "\n";
+if ($total_length <= 255) {
+ print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n";
+} else {
+ print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n";
+}
+for (my $i = 0; $i <= $#names; $i++) {
+ print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n"
+}
+print C_FILE "\t[OID__NR] = ", $total_length, "\n";
+print C_FILE "};\n";
+
+#
+# Encode the OIDs
+#
+my @encoded_oids = ();
+
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = ();
+
+ my @components = split(/[.]/, $oids[$i]);
+
+ push @octets, $components[0] * 40 + $components[1];
+
+ for (my $loop = 2; $loop <= $#components; $loop++) {
+ my $c = $components[$loop];
+
+ # Base128 encode the number
+ my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ $tmp = int($tmp / 7);
+
+ for (; $tmp > 0; $tmp--) {
+ push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80;
+ }
+ push @octets, $c & 0x7f;
+ }
+
+ push @encoded_oids, \@octets;
+}
+
+#
+# Create a hash value for each OID
+#
+my @hash_values = ();
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$i]};
+
+ my $hash = $#octets;
+ foreach (@octets) {
+ $hash += $_ * 33;
+ }
+
+ $hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash);
+
+ push @hash_values, $hash & 0xff;
+}
+
+#
+# Emit the OID data
+#
+print C_FILE "\n";
+print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n";
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$i]};
+ print C_FILE "\t";
+ print C_FILE $_, ", " foreach (@octets);
+ print C_FILE "\t// ", $names[$i];
+ print C_FILE "\n";
+}
+print C_FILE "};\n";
+
+#
+# Build the search index table (ordered by length then hash then content)
+#
+my @index_table = ( 0 .. $#names );
+
+@index_table = sort {
+ my @octets_a = @{$encoded_oids[$a]};
+ my @octets_b = @{$encoded_oids[$b]};
+
+ return $hash_values[$a] <=> $hash_values[$b]
+ if ($hash_values[$a] != $hash_values[$b]);
+ return $#octets_a <=> $#octets_b
+ if ($#octets_a != $#octets_b);
+ for (my $i = $#octets_a; $i >= 0; $i--) {
+ return $octets_a[$i] <=> $octets_b[$i]
+ if ($octets_a[$i] != $octets_b[$i]);
+ }
+ return 0;
+
+} @index_table;
+
+#
+# Emit the search index and hash value table
+#
+print C_FILE "\n";
+print C_FILE "static const struct {\n";
+print C_FILE "\tunsigned char hash;\n";
+if ($#names <= 255) {
+ print C_FILE "\tenum OID oid : 8;\n";
+} else {
+ print C_FILE "\tenum OID oid : 16;\n";
+}
+print C_FILE "} oid_search_table[OID__NR] = {\n";
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$index_table[$i]]};
+ printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ",
+ $i,
+ $hash_values[$index_table[$i]],
+ $names[$index_table[$i]]);
+ printf C_FILE "%02x", $_ foreach (@octets);
+ print C_FILE "\n";
+}
+print C_FILE "};\n";
+
+#
+# Emit the OID debugging name table
+#
+#print C_FILE "\n";
+#print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n";
+#
+#for (my $i = 0; $i <= $#names; $i++) {
+# print C_FILE "\t\"", $names[$i], "\",\n"
+#}
+#print C_FILE "\t\"Unknown-OID\"\n";
+#print C_FILE "};\n";
+
+#
+# Polish off
+#
+close C_FILE or die;
diff --git a/lib/crc32.c b/lib/crc32.c
index 61774b8db4de..072fbd8234d5 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
+ return crc32_le_generic(crc, p, len,
+ (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
EXPORT_SYMBOL(crc32_le);
@@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len,
+ (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/decompress.c b/lib/decompress.c
index 3d766b7f60ab..31a804277282 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/init.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
@@ -31,11 +32,13 @@
# define unlzo NULL
#endif
-static const struct compress_format {
+struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
-} compressed_formats[] = {
+};
+
+static const struct compress_format compressed_formats[] __initdata = {
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
@@ -45,7 +48,7 @@ static const struct compress_format {
{ {0, 0}, NULL, NULL }
};
-decompress_fn decompress_method(const unsigned char *inbuf, int len,
+decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
const char **name)
{
const struct compress_format *cf;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 66ce41489133..b9087bff008b 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page",
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
-/* little merge helper - remove it after the merge window */
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
-
/*
* The access to some variables in this macro is racy. We can't use atomic_t
* here because all these variables are exported to debugfs. Some of them even
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3cd14b3..3657f129d7b8 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
if (a < b)
swap(a, b);
+
+ if (!b)
+ return a;
while ((r = a % b) != 0) {
a = b;
b = r;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f8d5439e2d9..71fcfcd96410 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
if (CRC_LE_BITS > 1) {
crc32init_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
if (CRC_BE_BITS > 1) {
crc32init_be();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
- printf("static const u32 __cacheline_aligned "
+ printf("static u32 __cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 6bc04aab6ec7..ca208a92628c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
+ pool->algo = gen_pool_first_fit;
+ pool->data = NULL;
}
return pool;
}
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy);
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm. Can not be used in NMI handler on
- * architectures without NMI-safe cmpxchg implementation.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
retry:
- start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
- start_bit, nbits, 0);
+ start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
+ pool->data);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool)
return size;
}
EXPORT_SYMBOL_GPL(gen_pool_size);
+
+/**
+ * gen_pool_set_algo - set the allocation algorithm
+ * @pool: pool to change allocation algorithm
+ * @algo: custom algorithm function
+ * @data: additional data used by @algo
+ *
+ * Call @algo for each memory allocation in the pool.
+ * If @algo is NULL use gen_pool_first_fit as default
+ * memory allocation function.
+ */
+void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
+{
+ rcu_read_lock();
+
+ pool->algo = algo;
+ if (!pool->algo)
+ pool->algo = gen_pool_first_fit;
+
+ pool->data = data;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_set_algo);
+
+/**
+ * gen_pool_first_fit - find the first available region
+ * of memory matching the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ return bitmap_find_next_zero_area(map, size, start, nr, 0);
+}
+EXPORT_SYMBOL(gen_pool_first_fit);
+
+/**
+ * gen_pool_best_fit - find the best fitting region of memory
+ * macthing the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ *
+ * Iterate over the bitmap to find the smallest free region
+ * which we can allocate the memory.
+ */
+unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data)
+{
+ unsigned long start_bit = size;
+ unsigned long len = size + 1;
+ unsigned long index;
+
+ index = bitmap_find_next_zero_area(map, size, start, nr, 0);
+
+ while (index < size) {
+ int next_bit = find_next_bit(map, size, index + nr);
+ if ((next_bit - index) < len) {
+ len = next_bit - index;
+ start_bit = index;
+ if (len == nr)
+ return start_bit;
+ }
+ index = bitmap_find_next_zero_area(map, size,
+ next_bit + 1, nr, 0);
+ }
+
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_best_fit);
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a99..648239079dd2 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
* that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
+ * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
- while (idp->id_free_cnt < IDR_FREE_MAX) {
+ while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
- if ((id >= MAX_ID_BIT) || (id < 0))
+ if ((id >= MAX_IDR_BIT) || (id < 0))
return IDR_NOMORE_SPACE;
if (l == 0)
break;
@@ -223,7 +223,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
int id;
id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
to_free->bitmap = to_free->count = 0;
free_layer(to_free);
}
- while (idp->id_free_cnt >= IDR_FREE_MAX) {
+ while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
n = (p->layer+1) * IDR_BITS;
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
if (t < 0)
return _idr_rc_to_errno(t);
- if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
+ if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
}
id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_ID_BIT)
+ if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
new file mode 100644
index 000000000000..e6eb406f2d65
--- /dev/null
+++ b/lib/interval_tree.c
@@ -0,0 +1,10 @@
+#include <linux/init.h>
+#include <linux/interval_tree.h>
+#include <linux/interval_tree_generic.h>
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST,, interval_tree)
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
new file mode 100644
index 000000000000..b25903987f7a
--- /dev/null
+++ b/lib/interval_tree_test_main.c
@@ -0,0 +1,105 @@
+#include <linux/module.h>
+#include <linux/interval_tree.h>
+#include <linux/random.h>
+#include <asm/timex.h>
+
+#define NODES 100
+#define PERF_LOOPS 100000
+#define SEARCHES 100
+#define SEARCH_LOOPS 10000
+
+static struct rb_root root = RB_ROOT;
+static struct interval_tree_node nodes[NODES];
+static u32 queries[SEARCHES];
+
+static struct rnd_state rnd;
+
+static inline unsigned long
+search(unsigned long query, struct rb_root *root)
+{
+ struct interval_tree_node *node;
+ unsigned long results = 0;
+
+ for (node = interval_tree_iter_first(root, query, query); node;
+ node = interval_tree_iter_next(node, query, query))
+ results++;
+ return results;
+}
+
+static void init(void)
+{
+ int i;
+ for (i = 0; i < NODES; i++) {
+ u32 a = prandom32(&rnd), b = prandom32(&rnd);
+ if (a <= b) {
+ nodes[i].start = a;
+ nodes[i].last = b;
+ } else {
+ nodes[i].start = b;
+ nodes[i].last = a;
+ }
+ }
+ for (i = 0; i < SEARCHES; i++)
+ queries[i] = prandom32(&rnd);
+}
+
+static int interval_tree_test_init(void)
+{
+ int i, j;
+ unsigned long results;
+ cycles_t time1, time2, time;
+
+ printk(KERN_ALERT "interval tree insert/remove");
+
+ prandom32_seed(&rnd, 3141592653589793238ULL);
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < PERF_LOOPS; i++) {
+ for (j = 0; j < NODES; j++)
+ interval_tree_insert(nodes + j, &root);
+ for (j = 0; j < NODES; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, PERF_LOOPS);
+ printk(" -> %llu cycles\n", (unsigned long long)time);
+
+ printk(KERN_ALERT "interval tree search");
+
+ for (j = 0; j < NODES; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ time1 = get_cycles();
+
+ results = 0;
+ for (i = 0; i < SEARCH_LOOPS; i++)
+ for (j = 0; j < SEARCHES; j++)
+ results += search(queries[j], &root);
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, SEARCH_LOOPS);
+ results = div_u64(results, SEARCH_LOOPS);
+ printk(" -> %llu cycles (%lu results)\n",
+ (unsigned long long)time, results);
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void interval_tree_test_exit(void)
+{
+ printk(KERN_ALERT "test exit\n");
+}
+
+module_init(interval_tree_test_init)
+module_exit(interval_tree_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michel Lespinasse");
+MODULE_DESCRIPTION("Interval Tree test");
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index ae0de80c1c88..32f12150fc4f 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -21,7 +21,7 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
len = vsnprintf(NULL, 0, fmt, aq);
va_end(aq);
- p = kmalloc(len+1, gfp);
+ p = kmalloc_track_caller(len+1, gfp);
if (!p)
return NULL;
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index 45ca90a8639c..019a68c90144 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -14,6 +14,7 @@ mpi-y = \
generic_mpih-add1.o \
mpicoder.o \
mpi-bit.o \
+ mpi-cmp.o \
mpih-cmp.o \
mpih-div.o \
mpih-mul.o \
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 29f98624ef93..678ce4f1e124 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -19,6 +19,8 @@
* the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
* MA 02111-1307, USA. */
+#include <asm-generic/bitops/count_zeros.h>
+
/* You have to define the following before including this file:
*
* UWtype -- An unsigned type, default type for operations (typically a "word")
@@ -146,12 +148,6 @@ do { \
: "1" ((USItype)(n1)), \
"r" ((USItype)(n0)), \
"r" ((USItype)(d)))
-
-#define count_leading_zeros(count, x) \
- __asm__ ("clz %0,%1" \
- : "=r" ((USItype)(count)) \
- : "r" ((USItype)(x)))
-#define COUNT_LEADING_ZEROS_0 32
#endif /* __a29k__ */
#if defined(__alpha) && W_TYPE_SIZE == 64
@@ -298,11 +294,6 @@ extern UDItype __udiv_qrnnd();
: "1" ((USItype)(nh)), \
"0" ((USItype)(nl)), \
"g" ((USItype)(d)))
-#define count_leading_zeros(count, x) \
- __asm__ ("bsch/1 %1,%0" \
- : "=g" (count) \
- : "g" ((USItype)(x)), \
- "0" ((USItype)0))
#endif
/***************************************
@@ -354,27 +345,6 @@ do { USItype __r; \
} while (0)
extern USItype __udiv_qrnnd();
#endif /* LONGLONG_STANDALONE */
-#define count_leading_zeros(count, x) \
-do { \
- USItype __tmp; \
- __asm__ ( \
- "ldi 1,%0\n" \
- "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
- "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
- "ldo 16(%0),%0 ; Yes. Perform add.\n" \
- "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
- "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
- "ldo 8(%0),%0 ; Yes. Perform add.\n" \
- "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
- "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
- "ldo 4(%0),%0 ; Yes. Perform add.\n" \
- "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
- "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
- "ldo 2(%0),%0 ; Yes. Perform add.\n" \
- "extru %1,30,1,%1 ; Extract bit 1.\n" \
- "sub %0,%1,%0 ; Subtract it. " \
- : "=r" (count), "=r" (__tmp) : "1" (x)); \
-} while (0)
#endif /* hppa */
/***************************************
@@ -457,15 +427,6 @@ do { \
: "0" ((USItype)(n0)), \
"1" ((USItype)(n1)), \
"rm" ((USItype)(d)))
-#define count_leading_zeros(count, x) \
-do { \
- USItype __cbtmp; \
- __asm__ ("bsrl %1,%0" \
- : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
- (count) = __cbtmp ^ 31; \
-} while (0)
-#define count_trailing_zeros(count, x) \
- __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
#ifndef UMUL_TIME
#define UMUL_TIME 40
#endif
@@ -536,15 +497,6 @@ do { \
"dI" ((USItype)(d))); \
(r) = __rq.__i.__l; (q) = __rq.__i.__h; \
} while (0)
-#define count_leading_zeros(count, x) \
-do { \
- USItype __cbtmp; \
- __asm__ ("scanbit %1,%0" \
- : "=r" (__cbtmp) \
- : "r" ((USItype)(x))); \
- (count) = __cbtmp ^ 31; \
-} while (0)
-#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
#if defined(__i960mx) /* what is the proper symbol to test??? */
#define rshift_rhlc(r, h, l, c) \
do { \
@@ -603,11 +555,6 @@ do { \
: "0" ((USItype)(n0)), \
"1" ((USItype)(n1)), \
"dmi" ((USItype)(d)))
-#define count_leading_zeros(count, x) \
- __asm__ ("bfffo %1{%b2:%b2},%0" \
- : "=d" ((USItype)(count)) \
- : "od" ((USItype)(x)), "n" (0))
-#define COUNT_LEADING_ZEROS_0 32
#else /* not mc68020 */
#define umul_ppmm(xh, xl, a, b) \
do { USItype __umul_tmp1, __umul_tmp2; \
@@ -664,15 +611,6 @@ do { USItype __umul_tmp1, __umul_tmp2; \
"rJ" ((USItype)(bh)), \
"rJ" ((USItype)(al)), \
"rJ" ((USItype)(bl)))
-#define count_leading_zeros(count, x) \
-do { \
- USItype __cbtmp; \
- __asm__ ("ff1 %0,%1" \
- : "=r" (__cbtmp) \
- : "r" ((USItype)(x))); \
- (count) = __cbtmp ^ 31; \
-} while (0)
-#define COUNT_LEADING_ZEROS_0 63 /* sic */
#if defined(__m88110__)
#define umul_ppmm(wh, wl, u, v) \
do { \
@@ -779,12 +717,6 @@ do { \
: "0" (__xx.__ll), \
"g" ((USItype)(d))); \
(r) = __xx.__i.__l; (q) = __xx.__i.__h; })
-#define count_trailing_zeros(count, x) \
-do { \
- __asm__("ffsd %2,%0" \
- : "=r"((USItype) (count)) \
- : "0"((USItype) 0), "r"((USItype) (x))); \
- } while (0)
#endif /* __ns32000__ */
/***************************************
@@ -855,11 +787,6 @@ do { \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
} while (0)
-#define count_leading_zeros(count, x) \
- __asm__ ("{cntlz|cntlzw} %0,%1" \
- : "=r" ((USItype)(count)) \
- : "r" ((USItype)(x)))
-#define COUNT_LEADING_ZEROS_0 32
#if defined(_ARCH_PPC)
#define umul_ppmm(ph, pl, m0, m1) \
do { \
@@ -1001,19 +928,6 @@ do { \
} while (0)
#define UMUL_TIME 20
#define UDIV_TIME 200
-#define count_leading_zeros(count, x) \
-do { \
- if ((x) >= 0x10000) \
- __asm__ ("clz %0,%1" \
- : "=r" ((USItype)(count)) \
- : "r" ((USItype)(x) >> 16)); \
- else { \
- __asm__ ("clz %0,%1" \
- : "=r" ((USItype)(count)) \
- : "r" ((USItype)(x))); \
- (count) += 16; \
- } \
-} while (0)
#endif /* RT/ROMP */
/***************************************
@@ -1142,13 +1056,6 @@ do { \
"rI" ((USItype)(d)) \
: "%g1" __AND_CLOBBER_CC)
#define UDIV_TIME 37
-#define count_leading_zeros(count, x) \
- __asm__ ("scan %1,0,%0" \
- : "=r" ((USItype)(x)) \
- : "r" ((USItype)(count)))
-/* Early sparclites return 63 for an argument of 0, but they warn that future
- implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
- undefined. */
#endif /* __sparclite__ */
#endif /* __sparc_v8__ */
/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
@@ -1454,47 +1361,6 @@ do { \
#define udiv_qrnnd __udiv_qrnnd_c
#endif
-#undef count_leading_zeros
-#if !defined(count_leading_zeros)
- extern
-#ifdef __STDC__
- const
-#endif
- unsigned char __clz_tab[];
-#define count_leading_zeros(count, x) \
-do { \
- UWtype __xr = (x); \
- UWtype __a; \
- \
- if (W_TYPE_SIZE <= 32) { \
- __a = __xr < ((UWtype) 1 << 2*__BITS4) \
- ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \
- : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
- } \
- else { \
- for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
- if (((__xr >> __a) & 0xff) != 0) \
- break; \
- } \
- \
- (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
-} while (0)
- /* This version gives a well-defined value for zero. */
-#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
-#endif
-
-#if !defined(count_trailing_zeros)
-/* Define count_trailing_zeros using count_leading_zeros. The latter might be
- defined in asm, but if it is not, the C version above is good enough. */
-#define count_trailing_zeros(count, x) \
-do { \
- UWtype __ctz_x = (x); \
- UWtype __ctz_c; \
- count_leading_zeros(__ctz_c, __ctz_x & -__ctz_x); \
- (count) = W_TYPE_SIZE - 1 - __ctz_c; \
-} while (0)
-#endif
-
#ifndef UDIV_NEEDS_NORMALIZATION
#define UDIV_NEEDS_NORMALIZATION 0
#endif
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 568724804f29..503537e08436 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -45,7 +45,7 @@ unsigned mpi_get_nbits(MPI a)
if (a->nlimbs) {
mpi_limb_t alimb = a->d[a->nlimbs - 1];
if (alimb)
- count_leading_zeros(n, alimb);
+ n = count_leading_zeros(alimb);
else
n = BITS_PER_MPI_LIMB;
n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
new file mode 100644
index 000000000000..1871e7b61ca0
--- /dev/null
+++ b/lib/mpi/mpi-cmp.c
@@ -0,0 +1,70 @@
+/* mpi-cmp.c - MPI functions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+
+int mpi_cmp_ui(MPI u, unsigned long v)
+{
+ mpi_limb_t limb = v;
+
+ mpi_normalize(u);
+ if (!u->nlimbs && !limb)
+ return 0;
+ if (u->sign)
+ return -1;
+ if (u->nlimbs > 1)
+ return 1;
+
+ if (u->d[0] == limb)
+ return 0;
+ else if (u->d[0] > limb)
+ return 1;
+ else
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mpi_cmp_ui);
+
+int mpi_cmp(MPI u, MPI v)
+{
+ mpi_size_t usize, vsize;
+ int cmp;
+
+ mpi_normalize(u);
+ mpi_normalize(v);
+ usize = u->nlimbs;
+ vsize = v->nlimbs;
+ if (!u->sign && v->sign)
+ return 1;
+ if (u->sign && !v->sign)
+ return -1;
+ if (usize != vsize && !u->sign && !v->sign)
+ return usize - vsize;
+ if (usize != vsize && u->sign && v->sign)
+ return vsize + usize;
+ if (!usize)
+ return 0;
+ cmp = mpihelp_cmp(u->d, v->d, usize);
+ if (!cmp)
+ return 0;
+ if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0))
+ return 1;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mpi_cmp);
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 67f3e79af914..5464c8744ea9 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -77,7 +77,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mp = mp_marker = mpi_alloc_limb_space(msize);
if (!mp)
goto enomem;
- count_leading_zeros(mod_shift_cnt, mod->d[msize - 1]);
+ mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]);
if (mod_shift_cnt)
mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
else
@@ -169,7 +169,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
i = esize - 1;
e = ep[i];
- count_leading_zeros(c, e);
+ c = count_leading_zeros(e);
e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
c = BITS_PER_MPI_LIMB - 1 - c;
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index f0fa65995800..3962b7f7fe3f 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -18,10 +18,65 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
+#include <linux/bitops.h>
+#include <asm-generic/bitops/count_zeros.h>
#include "mpi-internal.h"
#define MAX_EXTERN_MPI_BITS 16384
+/**
+ * mpi_read_raw_data - Read a raw byte stream as a positive integer
+ * @xbuffer: The data to read
+ * @nbytes: The amount of data to read
+ */
+MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
+{
+ const uint8_t *buffer = xbuffer;
+ int i, j;
+ unsigned nbits, nlimbs;
+ mpi_limb_t a;
+ MPI val = NULL;
+
+ while (nbytes >= 0 && buffer[0] == 0) {
+ buffer++;
+ nbytes--;
+ }
+
+ nbits = nbytes * 8;
+ if (nbits > MAX_EXTERN_MPI_BITS) {
+ pr_info("MPI: mpi too large (%u bits)\n", nbits);
+ return NULL;
+ }
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(buffer[0]);
+ else
+ nbits = 0;
+
+ nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
+ val = mpi_alloc(nlimbs);
+ if (!val)
+ return NULL;
+ val->nbits = nbits;
+ val->sign = 0;
+ val->nlimbs = nlimbs;
+
+ if (nbytes > 0) {
+ i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+ i %= BYTES_PER_MPI_LIMB;
+ for (j = nlimbs; j > 0; j--) {
+ a = 0;
+ for (; i < BYTES_PER_MPI_LIMB; i++) {
+ a <<= 8;
+ a |= *buffer++;
+ }
+ i = 0;
+ val->d[j - 1] = a;
+ }
+ }
+ return val;
+}
+EXPORT_SYMBOL_GPL(mpi_read_raw_data);
+
MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
{
const uint8_t *buffer = xbuffer;
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
new file mode 100644
index 000000000000..d8de11f45908
--- /dev/null
+++ b/lib/oid_registry.c
@@ -0,0 +1,170 @@
+/* ASN.1 Object identifier (OID) registry
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/oid_registry.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include "oid_registry_data.c"
+
+/**
+ * look_up_OID - Find an OID registration for the specified data
+ * @data: Binary representation of the OID
+ * @datasize: Size of the binary representation
+ */
+enum OID look_up_OID(const void *data, size_t datasize)
+{
+ const unsigned char *octets = data;
+ enum OID oid;
+ unsigned char xhash;
+ unsigned i, j, k, hash;
+ size_t len;
+
+ /* Hash the OID data */
+ hash = datasize - 1;
+
+ for (i = 0; i < datasize; i++)
+ hash += octets[i] * 33;
+ hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash;
+ hash &= 0xff;
+
+ /* Binary search the OID registry. OIDs are stored in ascending order
+ * of hash value then ascending order of size and then in ascending
+ * order of reverse value.
+ */
+ i = 0;
+ k = OID__NR;
+ while (i < k) {
+ j = (i + k) / 2;
+
+ xhash = oid_search_table[j].hash;
+ if (xhash > hash) {
+ k = j;
+ continue;
+ }
+ if (xhash < hash) {
+ i = j + 1;
+ continue;
+ }
+
+ oid = oid_search_table[j].oid;
+ len = oid_index[oid + 1] - oid_index[oid];
+ if (len > datasize) {
+ k = j;
+ continue;
+ }
+ if (len < datasize) {
+ i = j + 1;
+ continue;
+ }
+
+ /* Variation is most likely to be at the tail end of the
+ * OID, so do the comparison in reverse.
+ */
+ while (len > 0) {
+ unsigned char a = oid_data[oid_index[oid] + --len];
+ unsigned char b = octets[len];
+ if (a > b) {
+ k = j;
+ goto next;
+ }
+ if (a < b) {
+ i = j + 1;
+ goto next;
+ }
+ }
+ return oid;
+ next:
+ ;
+ }
+
+ return OID__NR;
+}
+EXPORT_SYMBOL_GPL(look_up_OID);
+
+/*
+ * sprint_OID - Print an Object Identifier into a buffer
+ * @data: The encoded OID to print
+ * @datasize: The size of the encoded OID
+ * @buffer: The buffer to render into
+ * @bufsize: The size of the buffer
+ *
+ * The OID is rendered into the buffer in "a.b.c.d" format and the number of
+ * bytes is returned. -EBADMSG is returned if the data could not be intepreted
+ * and -ENOBUFS if the buffer was too small.
+ */
+int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
+{
+ const unsigned char *v = data, *end = v + datasize;
+ unsigned long num;
+ unsigned char n;
+ size_t ret;
+ int count;
+
+ if (v >= end)
+ return -EBADMSG;
+
+ n = *v++;
+ ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
+ buffer += count;
+ bufsize -= count;
+ if (bufsize == 0)
+ return -ENOBUFS;
+
+ while (v < end) {
+ num = 0;
+ n = *v++;
+ if (!(n & 0x80)) {
+ num = n;
+ } else {
+ num = n & 0x7f;
+ do {
+ if (v >= end)
+ return -EBADMSG;
+ n = *v++;
+ num <<= 7;
+ num |= n & 0x7f;
+ } while (n & 0x80);
+ }
+ ret += count = snprintf(buffer, bufsize, ".%lu", num);
+ buffer += count;
+ bufsize -= count;
+ if (bufsize == 0)
+ return -ENOBUFS;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sprint_oid);
+
+/**
+ * sprint_OID - Print an Object Identifier into a buffer
+ * @oid: The OID to print
+ * @buffer: The buffer to render into
+ * @bufsize: The size of the buffer
+ *
+ * The OID is rendered into the buffer in "a.b.c.d" format and the number of
+ * bytes is returned.
+ */
+int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
+{
+ int ret;
+
+ BUG_ON(oid >= OID__NR);
+
+ ret = sprint_oid(oid_data + oid_index[oid],
+ oid_index[oid + 1] - oid_index[oid],
+ buffer, bufsize);
+ BUG_ON(ret == -EBADMSG);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sprint_OID);
diff --git a/lib/parser.c b/lib/parser.c
index c43410084838..52cfa69f73df 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[])
*
* Description: Given a &substring_t and a base, attempts to parse the substring
* as a number in that base. On success, sets @result to the integer represented
- * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure.
+ * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
char *endp;
char *buf;
int ret;
+ long val;
size_t len = s->to - s->from;
buf = kmalloc(len + 1, GFP_KERNEL);
@@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base)
return -ENOMEM;
memcpy(buf, s->from, len);
buf[len] = '\0';
- *result = simple_strtol(buf, &endp, base);
+
ret = 0;
+ val = simple_strtol(buf, &endp, base);
if (endp == buf)
ret = -EINVAL;
+ else if (val < (long)INT_MIN || val > (long)INT_MAX)
+ ret = -ERANGE;
+ else
+ *result = (int) val;
kfree(buf);
return ret;
}
diff --git a/lib/plist.c b/lib/plist.c
index 6ab0e521c48b..1ebc95f7a46f 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -175,7 +175,7 @@ static int __init plist_test(void)
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
- printk(KERN_INFO "start plist test\n");
+ pr_debug("start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
@@ -203,7 +203,7 @@ static int __init plist_test(void)
plist_test_check(nr_expect);
}
- printk(KERN_INFO "end plist test\n");
+ pr_debug("end plist test\n");
return 0;
}
diff --git a/lib/prio_tree.c b/lib/prio_tree.c
deleted file mode 100644
index 8d443af03b4c..000000000000
--- a/lib/prio_tree.c
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * lib/prio_tree.c - priority search tree
- *
- * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
- *
- * This file is released under the GPL v2.
- *
- * Based on the radix priority search tree proposed by Edward M. McCreight
- * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
- *
- * 02Feb2004 Initial version
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/prio_tree.h>
-
-/*
- * A clever mix of heap and radix trees forms a radix priority search tree (PST)
- * which is useful for storing intervals, e.g, we can consider a vma as a closed
- * interval of file pages [offset_begin, offset_end], and store all vmas that
- * map a file in a PST. Then, using the PST, we can answer a stabbing query,
- * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a
- * given input interval X (a set of consecutive file pages), in "O(log n + m)"
- * time where 'log n' is the height of the PST, and 'm' is the number of stored
- * intervals (vmas) that overlap (map) with the input interval X (the set of
- * consecutive file pages).
- *
- * In our implementation, we store closed intervals of the form [radix_index,
- * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST
- * is designed for storing intervals with unique radix indices, i.e., each
- * interval have different radix_index. However, this limitation can be easily
- * overcome by using the size, i.e., heap_index - radix_index, as part of the
- * index, so we index the tree using [(radix_index,size), heap_index].
- *
- * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit
- * machine, the maximum height of a PST can be 64. We can use a balanced version
- * of the priority search tree to optimize the tree height, but the balanced
- * tree proposed by McCreight is too complex and memory-hungry for our purpose.
- */
-
-/*
- * The following macros are used for implementing prio_tree for i_mmap
- */
-
-#define RADIX_INDEX(vma) ((vma)->vm_pgoff)
-#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
-/* avoid overflow */
-#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
-
-
-static void get_index(const struct prio_tree_root *root,
- const struct prio_tree_node *node,
- unsigned long *radix, unsigned long *heap)
-{
- if (root->raw) {
- struct vm_area_struct *vma = prio_tree_entry(
- node, struct vm_area_struct, shared.prio_tree_node);
-
- *radix = RADIX_INDEX(vma);
- *heap = HEAP_INDEX(vma);
- }
- else {
- *radix = node->start;
- *heap = node->last;
- }
-}
-
-static unsigned long index_bits_to_maxindex[BITS_PER_LONG];
-
-void __init prio_tree_init(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++)
- index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1;
- index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL;
-}
-
-/*
- * Maximum heap_index that can be stored in a PST with index_bits bits
- */
-static inline unsigned long prio_tree_maxindex(unsigned int bits)
-{
- return index_bits_to_maxindex[bits - 1];
-}
-
-static void prio_set_parent(struct prio_tree_node *parent,
- struct prio_tree_node *child, bool left)
-{
- if (left)
- parent->left = child;
- else
- parent->right = child;
-
- child->parent = parent;
-}
-
-/*
- * Extend a priority search tree so that it can store a node with heap_index
- * max_heap_index. In the worst case, this algorithm takes O((log n)^2).
- * However, this function is used rarely and the common case performance is
- * not bad.
- */
-static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
- struct prio_tree_node *node, unsigned long max_heap_index)
-{
- struct prio_tree_node *prev;
-
- if (max_heap_index > prio_tree_maxindex(root->index_bits))
- root->index_bits++;
-
- prev = node;
- INIT_PRIO_TREE_NODE(node);
-
- while (max_heap_index > prio_tree_maxindex(root->index_bits)) {
- struct prio_tree_node *tmp = root->prio_tree_node;
-
- root->index_bits++;
-
- if (prio_tree_empty(root))
- continue;
-
- prio_tree_remove(root, root->prio_tree_node);
- INIT_PRIO_TREE_NODE(tmp);
-
- prio_set_parent(prev, tmp, true);
- prev = tmp;
- }
-
- if (!prio_tree_empty(root))
- prio_set_parent(prev, root->prio_tree_node, true);
-
- root->prio_tree_node = node;
- return node;
-}
-
-/*
- * Replace a prio_tree_node with a new node and return the old node
- */
-struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
- struct prio_tree_node *old, struct prio_tree_node *node)
-{
- INIT_PRIO_TREE_NODE(node);
-
- if (prio_tree_root(old)) {
- BUG_ON(root->prio_tree_node != old);
- /*
- * We can reduce root->index_bits here. However, it is complex
- * and does not help much to improve performance (IMO).
- */
- root->prio_tree_node = node;
- } else
- prio_set_parent(old->parent, node, old->parent->left == old);
-
- if (!prio_tree_left_empty(old))
- prio_set_parent(node, old->left, true);
-
- if (!prio_tree_right_empty(old))
- prio_set_parent(node, old->right, false);
-
- return old;
-}
-
-/*
- * Insert a prio_tree_node @node into a radix priority search tree @root. The
- * algorithm typically takes O(log n) time where 'log n' is the number of bits
- * required to represent the maximum heap_index. In the worst case, the algo
- * can take O((log n)^2) - check prio_tree_expand.
- *
- * If a prior node with same radix_index and heap_index is already found in
- * the tree, then returns the address of the prior node. Otherwise, inserts
- * @node into the tree and returns @node.
- */
-struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
- struct prio_tree_node *node)
-{
- struct prio_tree_node *cur, *res = node;
- unsigned long radix_index, heap_index;
- unsigned long r_index, h_index, index, mask;
- int size_flag = 0;
-
- get_index(root, node, &radix_index, &heap_index);
-
- if (prio_tree_empty(root) ||
- heap_index > prio_tree_maxindex(root->index_bits))
- return prio_tree_expand(root, node, heap_index);
-
- cur = root->prio_tree_node;
- mask = 1UL << (root->index_bits - 1);
-
- while (mask) {
- get_index(root, cur, &r_index, &h_index);
-
- if (r_index == radix_index && h_index == heap_index)
- return cur;
-
- if (h_index < heap_index ||
- (h_index == heap_index && r_index > radix_index)) {
- struct prio_tree_node *tmp = node;
- node = prio_tree_replace(root, cur, node);
- cur = tmp;
- /* swap indices */
- index = r_index;
- r_index = radix_index;
- radix_index = index;
- index = h_index;
- h_index = heap_index;
- heap_index = index;
- }
-
- if (size_flag)
- index = heap_index - radix_index;
- else
- index = radix_index;
-
- if (index & mask) {
- if (prio_tree_right_empty(cur)) {
- INIT_PRIO_TREE_NODE(node);
- prio_set_parent(cur, node, false);
- return res;
- } else
- cur = cur->right;
- } else {
- if (prio_tree_left_empty(cur)) {
- INIT_PRIO_TREE_NODE(node);
- prio_set_parent(cur, node, true);
- return res;
- } else
- cur = cur->left;
- }
-
- mask >>= 1;
-
- if (!mask) {
- mask = 1UL << (BITS_PER_LONG - 1);
- size_flag = 1;
- }
- }
- /* Should not reach here */
- BUG();
- return NULL;
-}
-
-/*
- * Remove a prio_tree_node @node from a radix priority search tree @root. The
- * algorithm takes O(log n) time where 'log n' is the number of bits required
- * to represent the maximum heap_index.
- */
-void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node)
-{
- struct prio_tree_node *cur;
- unsigned long r_index, h_index_right, h_index_left;
-
- cur = node;
-
- while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) {
- if (!prio_tree_left_empty(cur))
- get_index(root, cur->left, &r_index, &h_index_left);
- else {
- cur = cur->right;
- continue;
- }
-
- if (!prio_tree_right_empty(cur))
- get_index(root, cur->right, &r_index, &h_index_right);
- else {
- cur = cur->left;
- continue;
- }
-
- /* both h_index_left and h_index_right cannot be 0 */
- if (h_index_left >= h_index_right)
- cur = cur->left;
- else
- cur = cur->right;
- }
-
- if (prio_tree_root(cur)) {
- BUG_ON(root->prio_tree_node != cur);
- __INIT_PRIO_TREE_ROOT(root, root->raw);
- return;
- }
-
- if (cur->parent->right == cur)
- cur->parent->right = cur->parent;
- else
- cur->parent->left = cur->parent;
-
- while (cur != node)
- cur = prio_tree_replace(root, cur->parent, cur);
-}
-
-static void iter_walk_down(struct prio_tree_iter *iter)
-{
- iter->mask >>= 1;
- if (iter->mask) {
- if (iter->size_level)
- iter->size_level++;
- return;
- }
-
- if (iter->size_level) {
- BUG_ON(!prio_tree_left_empty(iter->cur));
- BUG_ON(!prio_tree_right_empty(iter->cur));
- iter->size_level++;
- iter->mask = ULONG_MAX;
- } else {
- iter->size_level = 1;
- iter->mask = 1UL << (BITS_PER_LONG - 1);
- }
-}
-
-static void iter_walk_up(struct prio_tree_iter *iter)
-{
- if (iter->mask == ULONG_MAX)
- iter->mask = 1UL;
- else if (iter->size_level == 1)
- iter->mask = 1UL;
- else
- iter->mask <<= 1;
- if (iter->size_level)
- iter->size_level--;
- if (!iter->size_level && (iter->value & iter->mask))
- iter->value ^= iter->mask;
-}
-
-/*
- * Following functions help to enumerate all prio_tree_nodes in the tree that
- * overlap with the input interval X [radix_index, heap_index]. The enumeration
- * takes O(log n + m) time where 'log n' is the height of the tree (which is
- * proportional to # of bits required to represent the maximum heap_index) and
- * 'm' is the number of prio_tree_nodes that overlap the interval X.
- */
-
-static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
- unsigned long *r_index, unsigned long *h_index)
-{
- if (prio_tree_left_empty(iter->cur))
- return NULL;
-
- get_index(iter->root, iter->cur->left, r_index, h_index);
-
- if (iter->r_index <= *h_index) {
- iter->cur = iter->cur->left;
- iter_walk_down(iter);
- return iter->cur;
- }
-
- return NULL;
-}
-
-static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
- unsigned long *r_index, unsigned long *h_index)
-{
- unsigned long value;
-
- if (prio_tree_right_empty(iter->cur))
- return NULL;
-
- if (iter->size_level)
- value = iter->value;
- else
- value = iter->value | iter->mask;
-
- if (iter->h_index < value)
- return NULL;
-
- get_index(iter->root, iter->cur->right, r_index, h_index);
-
- if (iter->r_index <= *h_index) {
- iter->cur = iter->cur->right;
- iter_walk_down(iter);
- return iter->cur;
- }
-
- return NULL;
-}
-
-static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter)
-{
- iter->cur = iter->cur->parent;
- iter_walk_up(iter);
- return iter->cur;
-}
-
-static inline int overlap(struct prio_tree_iter *iter,
- unsigned long r_index, unsigned long h_index)
-{
- return iter->h_index >= r_index && iter->r_index <= h_index;
-}
-
-/*
- * prio_tree_first:
- *
- * Get the first prio_tree_node that overlaps with the interval [radix_index,
- * heap_index]. Note that always radix_index <= heap_index. We do a pre-order
- * traversal of the tree.
- */
-static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
-{
- struct prio_tree_root *root;
- unsigned long r_index, h_index;
-
- INIT_PRIO_TREE_ITER(iter);
-
- root = iter->root;
- if (prio_tree_empty(root))
- return NULL;
-
- get_index(root, root->prio_tree_node, &r_index, &h_index);
-
- if (iter->r_index > h_index)
- return NULL;
-
- iter->mask = 1UL << (root->index_bits - 1);
- iter->cur = root->prio_tree_node;
-
- while (1) {
- if (overlap(iter, r_index, h_index))
- return iter->cur;
-
- if (prio_tree_left(iter, &r_index, &h_index))
- continue;
-
- if (prio_tree_right(iter, &r_index, &h_index))
- continue;
-
- break;
- }
- return NULL;
-}
-
-/*
- * prio_tree_next:
- *
- * Get the next prio_tree_node that overlaps with the input interval in iter
- */
-struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
-{
- unsigned long r_index, h_index;
-
- if (iter->cur == NULL)
- return prio_tree_first(iter);
-
-repeat:
- while (prio_tree_left(iter, &r_index, &h_index))
- if (overlap(iter, r_index, h_index))
- return iter->cur;
-
- while (!prio_tree_right(iter, &r_index, &h_index)) {
- while (!prio_tree_root(iter->cur) &&
- iter->cur->parent->right == iter->cur)
- prio_tree_parent(iter);
-
- if (prio_tree_root(iter->cur))
- return NULL;
-
- prio_tree_parent(iter);
- }
-
- if (overlap(iter, r_index, h_index))
- return iter->cur;
-
- goto repeat;
-}
diff --git a/lib/rbtree.c b/lib/rbtree.c
index d4175565dc2c..4f56a11d67fa 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -2,7 +2,8 @@
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
-
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -20,339 +21,382 @@
linux/lib/rbtree.c
*/
-#include <linux/rbtree.h>
+#include <linux/rbtree_augmented.h>
#include <linux/export.h>
-static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *right = node->rb_right;
- struct rb_node *parent = rb_parent(node);
-
- if ((node->rb_right = right->rb_left))
- rb_set_parent(right->rb_left, node);
- right->rb_left = node;
-
- rb_set_parent(right, parent);
+/*
+ * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ *
+ * 1) A node is either red or black
+ * 2) The root is black
+ * 3) All leaves (NULL) are black
+ * 4) Both children of every red node are black
+ * 5) Every simple path from root to leaves contains the same number
+ * of black nodes.
+ *
+ * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ * consecutive red nodes in a path and every red node is therefore followed by
+ * a black. So if B is the number of black nodes on every simple path (as per
+ * 5), then the longest possible path due to 4 is 2B.
+ *
+ * We shall indicate color with case, where black nodes are uppercase and red
+ * nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ * parentheses and have some accompanying text comment.
+ */
- if (parent)
- {
- if (node == parent->rb_left)
- parent->rb_left = right;
- else
- parent->rb_right = right;
- }
- else
- root->rb_node = right;
- rb_set_parent(node, right);
+static inline void rb_set_black(struct rb_node *rb)
+{
+ rb->__rb_parent_color |= RB_BLACK;
}
-static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
{
- struct rb_node *left = node->rb_left;
- struct rb_node *parent = rb_parent(node);
-
- if ((node->rb_left = left->rb_right))
- rb_set_parent(left->rb_right, node);
- left->rb_right = node;
-
- rb_set_parent(left, parent);
+ return (struct rb_node *)red->__rb_parent_color;
+}
- if (parent)
- {
- if (node == parent->rb_right)
- parent->rb_right = left;
- else
- parent->rb_left = left;
- }
- else
- root->rb_node = left;
- rb_set_parent(node, left);
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+ struct rb_root *root, int color)
+{
+ struct rb_node *parent = rb_parent(old);
+ new->__rb_parent_color = old->__rb_parent_color;
+ rb_set_parent_color(old, new, color);
+ __rb_change_child(old, new, parent, root);
}
-void rb_insert_color(struct rb_node *node, struct rb_root *root)
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- struct rb_node *parent, *gparent;
-
- while ((parent = rb_parent(node)) && rb_is_red(parent))
- {
- gparent = rb_parent(parent);
-
- if (parent == gparent->rb_left)
- {
- {
- register struct rb_node *uncle = gparent->rb_right;
- if (uncle && rb_is_red(uncle))
- {
- rb_set_black(uncle);
- rb_set_black(parent);
- rb_set_red(gparent);
- node = gparent;
- continue;
- }
+ struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+ while (true) {
+ /*
+ * Loop invariant: node is red
+ *
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as we don't
+ * want a red root or two consecutive red nodes.
+ */
+ if (!parent) {
+ rb_set_parent_color(node, NULL, RB_BLACK);
+ break;
+ } else if (rb_is_black(parent))
+ break;
+
+ gparent = rb_red_parent(parent);
+
+ tmp = gparent->rb_right;
+ if (parent != tmp) { /* parent == gparent->rb_left */
+ if (tmp && rb_is_red(tmp)) {
+ /*
+ * Case 1 - color flips
+ *
+ * G g
+ * / \ / \
+ * p u --> P U
+ * / /
+ * n N
+ *
+ * However, since g's parent might be red, and
+ * 4) does not allow this, we need to recurse
+ * at g.
+ */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
}
- if (parent->rb_right == node)
- {
- register struct rb_node *tmp;
- __rb_rotate_left(parent, root);
- tmp = parent;
+ tmp = parent->rb_right;
+ if (node == tmp) {
+ /*
+ * Case 2 - left rotate at parent
+ *
+ * G G
+ * / \ / \
+ * p U --> n U
+ * \ /
+ * n p
+ *
+ * This still leaves us in violation of 4), the
+ * continuation into Case 3 will fix that.
+ */
+ parent->rb_right = tmp = node->rb_left;
+ node->rb_left = parent;
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
parent = node;
- node = tmp;
+ tmp = node->rb_right;
}
- rb_set_black(parent);
- rb_set_red(gparent);
- __rb_rotate_right(gparent, root);
+ /*
+ * Case 3 - right rotate at gparent
+ *
+ * G P
+ * / \ / \
+ * p U --> n g
+ * / \
+ * n U
+ */
+ gparent->rb_left = tmp; /* == parent->rb_right */
+ parent->rb_right = gparent;
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
} else {
- {
- register struct rb_node *uncle = gparent->rb_left;
- if (uncle && rb_is_red(uncle))
- {
- rb_set_black(uncle);
- rb_set_black(parent);
- rb_set_red(gparent);
- node = gparent;
- continue;
- }
+ tmp = gparent->rb_left;
+ if (tmp && rb_is_red(tmp)) {
+ /* Case 1 - color flips */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
}
- if (parent->rb_left == node)
- {
- register struct rb_node *tmp;
- __rb_rotate_right(parent, root);
- tmp = parent;
+ tmp = parent->rb_left;
+ if (node == tmp) {
+ /* Case 2 - right rotate at parent */
+ parent->rb_left = tmp = node->rb_right;
+ node->rb_right = parent;
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
parent = node;
- node = tmp;
+ tmp = node->rb_left;
}
- rb_set_black(parent);
- rb_set_red(gparent);
- __rb_rotate_left(gparent, root);
+ /* Case 3 - left rotate at gparent */
+ gparent->rb_right = tmp; /* == parent->rb_left */
+ parent->rb_left = gparent;
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
}
}
-
- rb_set_black(root->rb_node);
}
-EXPORT_SYMBOL(rb_insert_color);
-static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
- struct rb_root *root)
+__always_inline void
+__rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- struct rb_node *other;
-
- while ((!node || rb_is_black(node)) && node != root->rb_node)
- {
- if (parent->rb_left == node)
- {
- other = parent->rb_right;
- if (rb_is_red(other))
- {
- rb_set_black(other);
- rb_set_red(parent);
- __rb_rotate_left(parent, root);
- other = parent->rb_right;
+ struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+ while (true) {
+ /*
+ * Loop invariants:
+ * - node is black (or NULL on first iteration)
+ * - node is not the root (parent is not NULL)
+ * - All leaf paths going through parent and node have a
+ * black node count that is 1 lower than other leaf paths.
+ */
+ sibling = parent->rb_right;
+ if (node != sibling) { /* node == parent->rb_left */
+ if (rb_is_red(sibling)) {
+ /*
+ * Case 1 - left rotate at parent
+ *
+ * P S
+ * / \ / \
+ * N s --> p Sr
+ * / \ / \
+ * Sl Sr N Sl
+ */
+ parent->rb_right = tmp1 = sibling->rb_left;
+ sibling->rb_left = parent;
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
}
- if ((!other->rb_left || rb_is_black(other->rb_left)) &&
- (!other->rb_right || rb_is_black(other->rb_right)))
- {
- rb_set_red(other);
- node = parent;
- parent = rb_parent(node);
- }
- else
- {
- if (!other->rb_right || rb_is_black(other->rb_right))
- {
- rb_set_black(other->rb_left);
- rb_set_red(other);
- __rb_rotate_right(other, root);
- other = parent->rb_right;
+ tmp1 = sibling->rb_right;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_left;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /*
+ * Case 2 - sibling color flip
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N s
+ * / \ / \
+ * Sl Sr Sl Sr
+ *
+ * This leaves us violating 5) which
+ * can be fixed by flipping p to black
+ * if it was red, or by recursing at p.
+ * p is red when coming from Case 1.
+ */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
}
- rb_set_color(other, rb_color(parent));
- rb_set_black(parent);
- rb_set_black(other->rb_right);
- __rb_rotate_left(parent, root);
- node = root->rb_node;
- break;
- }
- }
- else
- {
- other = parent->rb_left;
- if (rb_is_red(other))
- {
- rb_set_black(other);
- rb_set_red(parent);
- __rb_rotate_right(parent, root);
- other = parent->rb_left;
+ /*
+ * Case 3 - right rotate at sibling
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N Sl
+ * / \ \
+ * sl Sr s
+ * \
+ * Sr
+ */
+ sibling->rb_left = tmp1 = tmp2->rb_right;
+ tmp2->rb_right = sibling;
+ parent->rb_right = tmp2;
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
}
- if ((!other->rb_left || rb_is_black(other->rb_left)) &&
- (!other->rb_right || rb_is_black(other->rb_right)))
- {
- rb_set_red(other);
- node = parent;
- parent = rb_parent(node);
+ /*
+ * Case 4 - left rotate at parent + color flips
+ * (p and sl could be either color here.
+ * After rotation, p becomes black, s acquires
+ * p's color, and sl keeps its color)
+ *
+ * (p) (s)
+ * / \ / \
+ * N S --> P Sr
+ * / \ / \
+ * (sl) sr N (sl)
+ */
+ parent->rb_right = tmp2 = sibling->rb_left;
+ sibling->rb_left = parent;
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
+ } else {
+ sibling = parent->rb_left;
+ if (rb_is_red(sibling)) {
+ /* Case 1 - right rotate at parent */
+ parent->rb_left = tmp1 = sibling->rb_right;
+ sibling->rb_right = parent;
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
}
- else
- {
- if (!other->rb_left || rb_is_black(other->rb_left))
- {
- rb_set_black(other->rb_right);
- rb_set_red(other);
- __rb_rotate_left(other, root);
- other = parent->rb_left;
+ tmp1 = sibling->rb_left;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_right;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /* Case 2 - sibling color flip */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
}
- rb_set_color(other, rb_color(parent));
- rb_set_black(parent);
- rb_set_black(other->rb_left);
- __rb_rotate_right(parent, root);
- node = root->rb_node;
- break;
+ /* Case 3 - right rotate at sibling */
+ sibling->rb_right = tmp1 = tmp2->rb_left;
+ tmp2->rb_left = sibling;
+ parent->rb_left = tmp2;
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
}
+ /* Case 4 - left rotate at parent + color flips */
+ parent->rb_left = tmp2 = sibling->rb_right;
+ sibling->rb_right = parent;
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
}
}
- if (node)
- rb_set_black(node);
}
+EXPORT_SYMBOL(__rb_erase_color);
-void rb_erase(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *child, *parent;
- int color;
-
- if (!node->rb_left)
- child = node->rb_right;
- else if (!node->rb_right)
- child = node->rb_left;
- else
- {
- struct rb_node *old = node, *left;
-
- node = node->rb_right;
- while ((left = node->rb_left) != NULL)
- node = left;
-
- if (rb_parent(old)) {
- if (rb_parent(old)->rb_left == old)
- rb_parent(old)->rb_left = node;
- else
- rb_parent(old)->rb_right = node;
- } else
- root->rb_node = node;
-
- child = node->rb_right;
- parent = rb_parent(node);
- color = rb_color(node);
-
- if (parent == old) {
- parent = node;
- } else {
- if (child)
- rb_set_parent(child, parent);
- parent->rb_left = child;
-
- node->rb_right = old->rb_right;
- rb_set_parent(old->rb_right, node);
- }
-
- node->rb_parent_color = old->rb_parent_color;
- node->rb_left = old->rb_left;
- rb_set_parent(old->rb_left, node);
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
- goto color;
- }
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
- parent = rb_parent(node);
- color = rb_color(node);
-
- if (child)
- rb_set_parent(child, parent);
- if (parent)
- {
- if (parent->rb_left == node)
- parent->rb_left = child;
- else
- parent->rb_right = child;
- }
- else
- root->rb_node = child;
+static const struct rb_augment_callbacks dummy_callbacks = {
+ dummy_propagate, dummy_copy, dummy_rotate
+};
- color:
- if (color == RB_BLACK)
- __rb_erase_color(child, parent, root);
-}
-EXPORT_SYMBOL(rb_erase);
-
-static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- struct rb_node *parent;
-
-up:
- func(node, data);
- parent = rb_parent(node);
- if (!parent)
- return;
-
- if (node == parent->rb_left && parent->rb_right)
- func(parent->rb_right, data);
- else if (parent->rb_left)
- func(parent->rb_left, data);
-
- node = parent;
- goto up;
+ __rb_insert(node, root, dummy_rotate);
}
+EXPORT_SYMBOL(rb_insert_color);
-/*
- * after inserting @node into the tree, update the tree to account for
- * both the new entry and any damage done by rebalance
- */
-void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
+void rb_erase(struct rb_node *node, struct rb_root *root)
{
- if (node->rb_left)
- node = node->rb_left;
- else if (node->rb_right)
- node = node->rb_right;
-
- rb_augment_path(node, func, data);
+ rb_erase_augmented(node, root, &dummy_callbacks);
}
-EXPORT_SYMBOL(rb_augment_insert);
+EXPORT_SYMBOL(rb_erase);
/*
- * before removing the node, find the deepest node on the rebalance path
- * that will still be there after @node gets removed
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
*/
-struct rb_node *rb_augment_erase_begin(struct rb_node *node)
-{
- struct rb_node *deepest;
-
- if (!node->rb_right && !node->rb_left)
- deepest = rb_parent(node);
- else if (!node->rb_right)
- deepest = node->rb_left;
- else if (!node->rb_left)
- deepest = node->rb_right;
- else {
- deepest = rb_next(node);
- if (deepest->rb_right)
- deepest = deepest->rb_right;
- else if (rb_parent(deepest) != node)
- deepest = rb_parent(deepest);
- }
-
- return deepest;
-}
-EXPORT_SYMBOL(rb_augment_erase_begin);
-/*
- * after removal, update the tree to account for the removed entry
- * and any rebalance damage.
- */
-void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- if (node)
- rb_augment_path(node, func, data);
+ __rb_insert(node, root, augment_rotate);
}
-EXPORT_SYMBOL(rb_augment_erase_end);
+EXPORT_SYMBOL(__rb_insert_augmented);
/*
* This function returns the first node (in sort order) of the tree.
@@ -387,11 +431,13 @@ struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
- if (rb_parent(node) == node)
+ if (RB_EMPTY_NODE(node))
return NULL;
- /* If we have a right-hand child, go down and then left as far
- as we can. */
+ /*
+ * If we have a right-hand child, go down and then left as far
+ * as we can.
+ */
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
@@ -399,12 +445,13 @@ struct rb_node *rb_next(const struct rb_node *node)
return (struct rb_node *)node;
}
- /* No right-hand children. Everything down and left is
- smaller than us, so any 'next' node must be in the general
- direction of our parent. Go up the tree; any time the
- ancestor is a right-hand child of its parent, keep going
- up. First time it's a left-hand child of its parent, said
- parent is our 'next' node. */
+ /*
+ * No right-hand children. Everything down and left is smaller than us,
+ * so any 'next' node must be in the general direction of our parent.
+ * Go up the tree; any time the ancestor is a right-hand child of its
+ * parent, keep going up. First time it's a left-hand child of its
+ * parent, said parent is our 'next' node.
+ */
while ((parent = rb_parent(node)) && node == parent->rb_right)
node = parent;
@@ -416,11 +463,13 @@ struct rb_node *rb_prev(const struct rb_node *node)
{
struct rb_node *parent;
- if (rb_parent(node) == node)
+ if (RB_EMPTY_NODE(node))
return NULL;
- /* If we have a left-hand child, go down and then right as far
- as we can. */
+ /*
+ * If we have a left-hand child, go down and then right as far
+ * as we can.
+ */
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
@@ -428,8 +477,10 @@ struct rb_node *rb_prev(const struct rb_node *node)
return (struct rb_node *)node;
}
- /* No left-hand children. Go up till we find an ancestor which
- is a right-hand child of its parent */
+ /*
+ * No left-hand children. Go up till we find an ancestor which
+ * is a right-hand child of its parent.
+ */
while ((parent = rb_parent(node)) && node == parent->rb_left)
node = parent;
@@ -443,14 +494,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
- if (parent) {
- if (victim == parent->rb_left)
- parent->rb_left = new;
- else
- parent->rb_right = new;
- } else {
- root->rb_node = new;
- }
+ __rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
new file mode 100644
index 000000000000..268b23951fec
--- /dev/null
+++ b/lib/rbtree_test.c
@@ -0,0 +1,234 @@
+#include <linux/module.h>
+#include <linux/rbtree_augmented.h>
+#include <linux/random.h>
+#include <asm/timex.h>
+
+#define NODES 100
+#define PERF_LOOPS 100000
+#define CHECK_LOOPS 100
+
+struct test_node {
+ struct rb_node rb;
+ u32 key;
+
+ /* following fields used for testing augmented rbtree functionality */
+ u32 val;
+ u32 augmented;
+};
+
+static struct rb_root root = RB_ROOT;
+static struct test_node nodes[NODES];
+
+static struct rnd_state rnd;
+
+static void insert(struct test_node *node, struct rb_root *root)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+ u32 key = node->key;
+
+ while (*new) {
+ parent = *new;
+ if (key < rb_entry(parent, struct test_node, rb)->key)
+ new = &parent->rb_left;
+ else
+ new = &parent->rb_right;
+ }
+
+ rb_link_node(&node->rb, parent, new);
+ rb_insert_color(&node->rb, root);
+}
+
+static inline void erase(struct test_node *node, struct rb_root *root)
+{
+ rb_erase(&node->rb, root);
+}
+
+static inline u32 augment_recompute(struct test_node *node)
+{
+ u32 max = node->val, child_augmented;
+ if (node->rb.rb_left) {
+ child_augmented = rb_entry(node->rb.rb_left, struct test_node,
+ rb)->augmented;
+ if (max < child_augmented)
+ max = child_augmented;
+ }
+ if (node->rb.rb_right) {
+ child_augmented = rb_entry(node->rb.rb_right, struct test_node,
+ rb)->augmented;
+ if (max < child_augmented)
+ max = child_augmented;
+ }
+ return max;
+}
+
+RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
+ u32, augmented, augment_recompute)
+
+static void insert_augmented(struct test_node *node, struct rb_root *root)
+{
+ struct rb_node **new = &root->rb_node, *rb_parent = NULL;
+ u32 key = node->key;
+ u32 val = node->val;
+ struct test_node *parent;
+
+ while (*new) {
+ rb_parent = *new;
+ parent = rb_entry(rb_parent, struct test_node, rb);
+ if (parent->augmented < val)
+ parent->augmented = val;
+ if (key < parent->key)
+ new = &parent->rb.rb_left;
+ else
+ new = &parent->rb.rb_right;
+ }
+
+ node->augmented = val;
+ rb_link_node(&node->rb, rb_parent, new);
+ rb_insert_augmented(&node->rb, root, &augment_callbacks);
+}
+
+static void erase_augmented(struct test_node *node, struct rb_root *root)
+{
+ rb_erase_augmented(&node->rb, root, &augment_callbacks);
+}
+
+static void init(void)
+{
+ int i;
+ for (i = 0; i < NODES; i++) {
+ nodes[i].key = prandom32(&rnd);
+ nodes[i].val = prandom32(&rnd);
+ }
+}
+
+static bool is_red(struct rb_node *rb)
+{
+ return !(rb->__rb_parent_color & 1);
+}
+
+static int black_path_count(struct rb_node *rb)
+{
+ int count;
+ for (count = 0; rb; rb = rb_parent(rb))
+ count += !is_red(rb);
+ return count;
+}
+
+static void check(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0;
+ int blacks;
+ u32 prev_key = 0;
+
+ for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
+ struct test_node *node = rb_entry(rb, struct test_node, rb);
+ WARN_ON_ONCE(node->key < prev_key);
+ WARN_ON_ONCE(is_red(rb) &&
+ (!rb_parent(rb) || is_red(rb_parent(rb))));
+ if (!count)
+ blacks = black_path_count(rb);
+ else
+ WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
+ blacks != black_path_count(rb));
+ prev_key = node->key;
+ count++;
+ }
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
+static void check_augmented(int nr_nodes)
+{
+ struct rb_node *rb;
+
+ check(nr_nodes);
+ for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
+ struct test_node *node = rb_entry(rb, struct test_node, rb);
+ WARN_ON_ONCE(node->augmented != augment_recompute(node));
+ }
+}
+
+static int rbtree_test_init(void)
+{
+ int i, j;
+ cycles_t time1, time2, time;
+
+ printk(KERN_ALERT "rbtree testing");
+
+ prandom32_seed(&rnd, 3141592653589793238ULL);
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < PERF_LOOPS; i++) {
+ for (j = 0; j < NODES; j++)
+ insert(nodes + j, &root);
+ for (j = 0; j < NODES; j++)
+ erase(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, PERF_LOOPS);
+ printk(" -> %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < CHECK_LOOPS; i++) {
+ init();
+ for (j = 0; j < NODES; j++) {
+ check(j);
+ insert(nodes + j, &root);
+ }
+ for (j = 0; j < NODES; j++) {
+ check(NODES - j);
+ erase(nodes + j, &root);
+ }
+ check(0);
+ }
+
+ printk(KERN_ALERT "augmented rbtree testing");
+
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < PERF_LOOPS; i++) {
+ for (j = 0; j < NODES; j++)
+ insert_augmented(nodes + j, &root);
+ for (j = 0; j < NODES; j++)
+ erase_augmented(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, PERF_LOOPS);
+ printk(" -> %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < CHECK_LOOPS; i++) {
+ init();
+ for (j = 0; j < NODES; j++) {
+ check_augmented(j);
+ insert_augmented(nodes + j, &root);
+ }
+ for (j = 0; j < NODES; j++) {
+ check_augmented(NODES - j);
+ erase_augmented(nodes + j, &root);
+ }
+ check_augmented(0);
+ }
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void rbtree_test_exit(void)
+{
+ printk(KERN_ALERT "test exit\n");
+}
+
+module_init(rbtree_test_init)
+module_exit(rbtree_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michel Lespinasse");
+MODULE_DESCRIPTION("Red Black Tree test");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index fadae774a20c..3675452b23ca 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -39,6 +39,25 @@ struct scatterlist *sg_next(struct scatterlist *sg)
EXPORT_SYMBOL(sg_next);
/**
+ * sg_nents - return total count of entries in scatterlist
+ * @sg: The scatterlist
+ *
+ * Description:
+ * Allows to know how many entries are in sg, taking into acount
+ * chaining as well
+ *
+ **/
+int sg_nents(struct scatterlist *sg)
+{
+ int nents;
+ for (nents = 0; sg; sg = sg_next(sg))
+ nents++;
+ return nents;
+}
+EXPORT_SYMBOL(sg_nents);
+
+
+/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
@@ -404,14 +423,13 @@ EXPORT_SYMBOL(sg_miter_start);
* @miter: sg mapping iter to proceed
*
* Description:
- * Proceeds @miter@ to the next mapping. @miter@ should have been
- * started using sg_miter_start(). On successful return,
- * @miter@->page, @miter@->addr and @miter@->length point to the
- * current mapping.
+ * Proceeds @miter to the next mapping. @miter should have been started
+ * using sg_miter_start(). On successful return, @miter->page,
+ * @miter->addr and @miter->length point to the current mapping.
*
* Context:
- * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
- * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
+ * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
@@ -465,7 +483,8 @@ EXPORT_SYMBOL(sg_miter_next);
* resources (kmap) need to be released during iteration.
*
* Context:
- * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
+ * otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
@@ -479,7 +498,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON(!irqs_disabled());
+ WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
- int print_once = 1;
- for (;;) {
- for (i = 0; i < loops; i++) {
- if (arch_spin_trylock(&lock->raw_lock))
- return;
- __delay(1);
- }
- /* lockup suspected: */
- if (print_once) {
- print_once = 0;
- spin_dump(lock, "lockup suspected");
+ for (i = 0; i < loops; i++) {
+ if (arch_spin_trylock(&lock->raw_lock))
+ return;
+ __delay(1);
+ }
+ /* lockup suspected: */
+ spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
- trigger_all_cpu_backtrace();
+ trigger_all_cpu_backtrace();
#endif
- }
- }
+
+ /*
+ * The trylock above was causing a livelock. Give the lower level arch
+ * specific lock code a chance to acquire the lock. We have already
+ * printed a warning/backtrace at this point. The non-debug arch
+ * specific code might actually succeed in acquiring the lock. If it is
+ * not successful, the end-result is the same - there is no forward
+ * progress.
+ */
+ arch_spin_lock(&lock->raw_lock);
}
void do_raw_spin_lock(raw_spinlock_t *lock)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0e337541f005..39c99fea7c03 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r)
unsigned q;
/* Copy of previous function's body with added early returns */
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 2 */
- if (q == 0)
- return buf;
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 3 */
- if (r == 0)
- return buf;
- q = (r * (uint64_t)0x1999999a) >> 32;
- *buf++ = (r - 10 * q) + '0'; /* 4 */
- if (q == 0)
- return buf;
- r = (q * (uint64_t)0x1999999a) >> 32;
- *buf++ = (q - 10 * r) + '0'; /* 5 */
- if (r == 0)
- return buf;
- q = (r * 0x199a) >> 16;
- *buf++ = (r - 10 * q) + '0'; /* 6 */
+ while (r >= 10000) {
+ q = r + '0';
+ r = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = q - 10*r;
+ }
+
+ q = (r * 0x199a) >> 16; /* r <= 9999 */
+ *buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
- r = (q * 0xcd) >> 11;
- *buf++ = (q - 10 * r) + '0'; /* 7 */
+ r = (q * 0xcd) >> 11; /* q <= 999 */
+ *buf++ = (q - 10 * r) + '0';
if (r == 0)
return buf;
- q = (r * 0xcd) >> 11;
- *buf++ = (r - 10 * q) + '0'; /* 8 */
+ q = (r * 0xcd) >> 11; /* r <= 99 */
+ *buf++ = (r - 10 * q) + '0';
if (q == 0)
return buf;
- *buf++ = q + '0'; /* 9 */
+ *buf++ = q + '0'; /* q <= 9 */
return buf;
}
@@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n)
/* Second algorithm: valid only for 64-bit long longs */
+/* See comment in put_dec_full9 for choice of constants */
static noinline_for_stack
-char *put_dec_full4(char *buf, unsigned q)
+void put_dec_full4(char *buf, unsigned q)
{
unsigned r;
- r = (q * 0xcccd) >> 19;
- *buf++ = (q - 10 * r) + '0';
- q = (r * 0x199a) >> 16;
- *buf++ = (r - 10 * q) + '0';
+ r = (q * 0xccd) >> 15;
+ buf[0] = (q - 10 * r) + '0';
+ q = (r * 0xcd) >> 11;
+ buf[1] = (r - 10 * q) + '0';
r = (q * 0xcd) >> 11;
- *buf++ = (q - 10 * r) + '0';
- *buf++ = r + '0';
- return buf;
+ buf[2] = (q - 10 * r) + '0';
+ buf[3] = r + '0';
+}
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (d1 in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned put_dec_helper4(char *buf, unsigned x)
+{
+ uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
+
+ put_dec_full4(buf, x - q * 10000);
+ return q;
}
/* Based on code by Douglas W. Jones found at
@@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n)
d3 = (h >> 16); /* implicit "& 0xffff" */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+ q = put_dec_helper4(buf, q);
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(buf+4, q);
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(buf+8, q);
- buf = put_dec_full4(buf, q % 10000);
- q = q / 10000;
-
- d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1;
- buf = put_dec_full4(buf, d1 % 10000);
- q = d1 / 10000;
-
- d2 = q + 4749 * d3 + 42 * d2;
- buf = put_dec_full4(buf, d2 % 10000);
- q = d2 / 10000;
-
- d3 = q + 281 * d3;
- if (!d3)
- goto done;
- buf = put_dec_full4(buf, d3 % 10000);
- q = d3 / 10000;
- if (!q)
- goto done;
- buf = put_dec_full4(buf, q);
- done:
- while (buf[-1] == '0')
+ q += 281 * d3;
+ buf += 12;
+ if (q)
+ buf = put_dec_trunc8(buf, q);
+ else while (buf[-1] == '0')
--buf;
return buf;
@@ -990,7 +987,7 @@ int kptr_restrict __read_mostly;
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
- * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth)
+ * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -1341,7 +1338,10 @@ qualifier:
* %pR output the address range in a struct resource with decoded flags
* %pr output the address range in a struct resource with raw flags
* %pM output a 6-byte MAC address with colons
+ * %pMR output a 6-byte MAC address with colons in reversed order
+ * %pMF output a 6-byte MAC address with dashes
* %pm output a 6-byte MAC address without colons
+ * %pmR output a 6-byte MAC address without colons in reversed order
* %pI4 print an IPv4 address without leading zeros
* %pi4 print an IPv4 address with leading zeros
* %pI6 print an IPv6 address with colons
@@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
s16 field_width;
bool is_sign;
- while (*fmt && *str) {
+ while (*fmt) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
@@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
* advance both strings to next white space
*/
if (*fmt == '*') {
+ if (!*str)
+ break;
while (!isspace(*fmt) && *fmt != '%' && *fmt)
fmt++;
while (!isspace(*str) && *str)
@@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
}
}
- if (!*fmt || !*str)
+ if (!*fmt)
+ break;
+
+ if (*fmt == 'n') {
+ /* return number of characters read so far */
+ *va_arg(args, int *) = str - buf;
+ ++fmt;
+ continue;
+ }
+
+ if (!*str)
break;
base = 10;
@@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
num++;
}
continue;
- case 'n':
- /* return number of characters read so far */
- {
- int *i = (int *)va_arg(args, int*);
- *i = str - buf;
- }
- continue;
case 'o':
base = 8;
break;
@@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
str = next;
}
- /*
- * Now we've come all the way through so either the input string or the
- * format ended. In the former case, there can be a %n at the current
- * position in the format that needs to be filled.
- */
- if (*fmt == '%' && *(fmt + 1) == 'n') {
- int *p = (int *)va_arg(args, int *);
- *p = str - buf;
- }
-
return num;
}
EXPORT_SYMBOL(vsscanf);