summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig23
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--lib/Makefile7
-rw-r--r--lib/asn1_decoder.c27
-rw-r--r--lib/atomic64.c3
-rw-r--r--lib/atomic64_test.c68
-rw-r--r--lib/average.c64
-rw-r--r--lib/bitmap.c43
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_bunzip2.c6
-rw-r--r--lib/decompress_inflate.c31
-rw-r--r--lib/decompress_unlz4.c6
-rw-r--r--lib/decompress_unlzma.c9
-rw-r--r--lib/decompress_unlzo.c13
-rw-r--r--lib/decompress_unxz.c12
-rw-r--r--lib/devres.c13
-rw-r--r--lib/dma-debug.c3
-rw-r--r--lib/genalloc.c110
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/iommu-common.c8
-rw-r--r--lib/klist.c41
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/kstrtox.c2
-rw-r--r--lib/lockref.c8
-rw-r--r--lib/mpi/mpicoder.c38
-rw-r--r--lib/nmi_backtrace.c162
-rw-r--r--lib/pci_iomap.c73
-rw-r--r--lib/raid6/neon.c13
-rw-r--r--lib/raid6/neon.uc46
-rw-r--r--lib/rhashtable.c9
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/sg_split.c202
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/string_helpers.c26
-rw-r--r--lib/test-kstrtox.c6
-rw-r--r--lib/test_bpf.c817
-rw-r--r--lib/test_kasan.c6
-rw-r--r--lib/test_rhashtable.c163
-rw-r--r--lib/test_static_key_base.c68
-rw-r--r--lib/test_static_keys.c225
-rw-r--r--lib/vsprintf.c1
-rw-r--r--lib/zlib_deflate/deftree.c6
-rw-r--r--lib/zlib_deflate/defutil.h16
44 files changed, 2144 insertions, 297 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3a2ef67db6c7..2e491ac15622 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -53,9 +53,6 @@ config GENERIC_IO
config STMP_DEVICE
bool
-config PERCPU_RWSEM
- bool
-
config ARCH_USE_CMPXCHG_LOCKREF
bool
@@ -460,16 +457,6 @@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
config LRU_CACHE
tristate
-config AVERAGE
- bool "Averaging functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require averaging functions, but a module built outside
- the kernel tree does. Such modules that use library averaging
- functions require Y here.
-
- If unsure, say N.
-
config CLZ_TAB
bool
@@ -521,6 +508,13 @@ config UCS2_STRING
source "lib/fonts/Kconfig"
+config SG_SPLIT
+ def_bool n
+ help
+ Provides a heler to split scatterlists into chunks, each chunk being a
+ scatterlist. This should be selected by a driver or an API which
+ whishes to split a scatterlist amongst multiple DMA channel.
+
#
# sg chaining option
#
@@ -531,4 +525,7 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config ARCH_HAS_MMIO_FLUSH
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2894b23efb6..ab76b99adc85 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -916,12 +916,6 @@ config DEBUG_RT_MUTEXES
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
-config RT_MUTEX_TESTER
- bool "Built-in scriptable tester for rt-mutexes"
- depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
- help
- This option enables a rt-mutex tester.
-
config DEBUG_SPINLOCK
bool "Spinlock and rw-lock debugging: basic checks"
depends on DEBUG_KERNEL
@@ -1353,20 +1347,6 @@ config RCU_CPU_STALL_TIMEOUT
RCU grace period persists, additional CPU stall warnings are
printed at more widely spaced intervals.
-config RCU_CPU_STALL_INFO
- bool "Print additional diagnostics on RCU CPU stall"
- depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
- default y
- help
- For each stalled CPU that is aware of the current RCU grace
- period, print out additional per-CPU diagnostic information
- regarding scheduling-clock ticks, idle state, and,
- for RCU_FAST_NO_HZ kernels, idle-entry state.
-
- Say N if you are unsure.
-
- Say Y if you want to enable such diagnostics.
-
config RCU_TRACE
bool "Enable tracing for RCU"
depends on DEBUG_KERNEL
@@ -1379,7 +1359,7 @@ config RCU_TRACE
Say N if you are unsure.
config RCU_EQS_DEBUG
- bool "Use this when adding any sort of NO_HZ support to your arch"
+ bool "Provide debugging asserts for adding NO_HZ support to an arch"
depends on DEBUG_KERNEL
help
This option provides consistency checks in RCU's handling of
@@ -1542,6 +1522,13 @@ config FAIL_MMC_REQUEST
and to test how the mmc host driver handles retries from
the block device.
+config FAIL_FUTEX
+ bool "Fault-injection capability for futexes"
+ select DEBUG_FS
+ depends on FAULT_INJECTION && FUTEX
+ help
+ Provide fault-injection capability for futexes.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1840,6 +1827,15 @@ config MEMTEST
memtest=17, mean do 17 test patterns.
If you are unsure how to answer this question, answer N.
+config TEST_STATIC_KEYS
+ tristate "Test static keys"
+ default n
+ depends on m
+ help
+ Test the static key interfaces.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 777eda7d1ab4..39f24d6721e5 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,10 +18,6 @@ config KASAN
For better error detection enable CONFIG_STACKTRACE,
and add slub_debug=U to boot cmdline.
-config KASAN_SHADOW_OFFSET
- hex
- default 0xdffffc0000000000 if X86_64
-
choice
prompt "Instrumentation type"
depends on KASAN
diff --git a/lib/Makefile b/lib/Makefile
index 6897b527581a..13a7c6ae3fec 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o argv_split.o \
proportions.o flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o
+ earlycpio.o seq_buf.o nmi_backtrace.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -39,6 +39,8 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -138,8 +140,6 @@ obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
-obj-$(CONFIG_AVERAGE) += average.o
-
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
@@ -160,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
+obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1a000bb050f9..2b3f46c049d4 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -24,15 +24,20 @@ static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
[ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
[ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_MATCH_ANY] = 1,
+ [ASN1_OP_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_ANY] = 1,
+ [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_FAIL] = 1,
[ASN1_OP_COMPLETE] = 1,
[ASN1_OP_ACT] = 1 + 1,
+ [ASN1_OP_MAYBE_ACT] = 1 + 1,
[ASN1_OP_RETURN] = 1,
[ASN1_OP_END_SEQ] = 1,
[ASN1_OP_END_SEQ_OF] = 1 + 1,
@@ -177,6 +182,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
unsigned char flags = 0;
#define FLAG_INDEFINITE_LENGTH 0x01
#define FLAG_MATCHED 0x02
+#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */
#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
* - ie. whether or not we are going to parse
* a compound type.
@@ -208,9 +214,9 @@ next_op:
unsigned char tmp;
/* Skip conditional matches if possible */
- if ((op & ASN1_OP_MATCH__COND &&
- flags & FLAG_MATCHED) ||
- dp == datalen) {
+ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
+ (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+ flags &= ~FLAG_LAST_MATCHED;
pc += asn1_op_lengths[op];
goto next_op;
}
@@ -302,7 +308,9 @@ next_op:
/* Decide how to handle the operation */
switch (op) {
case ASN1_OP_MATCH_ANY_ACT:
+ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY_ACT:
+ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
if (ret < 0)
return ret;
@@ -319,8 +327,10 @@ next_op:
case ASN1_OP_MATCH:
case ASN1_OP_MATCH_OR_SKIP:
case ASN1_OP_MATCH_ANY:
+ case ASN1_OP_MATCH_ANY_OR_SKIP:
case ASN1_OP_COND_MATCH_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY:
+ case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
skip_data:
if (!(flags & FLAG_CONS)) {
if (flags & FLAG_INDEFINITE_LENGTH) {
@@ -422,8 +432,15 @@ next_op:
pc += asn1_op_lengths[op];
goto next_op;
+ case ASN1_OP_MAYBE_ACT:
+ if (!(flags & FLAG_LAST_MATCHED)) {
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+ if (ret < 0)
+ return ret;
pc += asn1_op_lengths[op];
goto next_op;
@@ -431,6 +448,7 @@ next_op:
if (unlikely(jsp <= 0))
goto jump_stack_underflow;
pc = jump_stack[--jsp];
+ flags |= FLAG_MATCHED | FLAG_LAST_MATCHED;
goto next_op;
default:
@@ -438,7 +456,8 @@ next_op:
}
/* Shouldn't reach here */
- pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
+ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n",
+ op, pc);
return -EBADMSG;
data_overrun_error:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1298c05ef528..2886ebac6567 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0211d30d8c39..83c33a5bcffb 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,8 +16,39 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#define TEST(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ atomic##bit##_##op(val, &v); \
+ r c_op val; \
+ WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \
+ (unsigned long long)atomic##bit##_read(&v), \
+ (unsigned long long)r); \
+} while (0)
+
+static __init void test_atomic(void)
+{
+ int v0 = 0xaaa31337;
+ int v1 = 0xdeadbeef;
+ int onestwos = 0x11112222;
+ int one = 1;
+
+ atomic_t v;
+ int r;
+
+ TEST(, add, +=, onestwos);
+ TEST(, add, +=, -one);
+ TEST(, sub, -=, onestwos);
+ TEST(, sub, -=, -one);
+ TEST(, or, |=, v1);
+ TEST(, and, &=, v1);
+ TEST(, xor, ^=, v1);
+ TEST(, andnot, &= ~, v1);
+}
+
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
-static __init int test_atomic64(void)
+static __init void test_atomic64(void)
{
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
@@ -34,15 +65,14 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
BUG_ON(atomic64_read(&v) != r);
- INIT(v0);
- atomic64_add(onestwos, &v);
- r += onestwos;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- atomic64_add(-one, &v);
- r += -one;
- BUG_ON(v.counter != r);
+ TEST(64, add, +=, onestwos);
+ TEST(64, add, +=, -one);
+ TEST(64, sub, -=, onestwos);
+ TEST(64, sub, -=, -one);
+ TEST(64, or, |=, v1);
+ TEST(64, and, &=, v1);
+ TEST(64, xor, ^=, v1);
+ TEST(64, andnot, &= ~, v1);
INIT(v0);
r += onestwos;
@@ -55,16 +85,6 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
INIT(v0);
- atomic64_sub(onestwos, &v);
- r -= onestwos;
- BUG_ON(v.counter != r);
-
- INIT(v0);
- atomic64_sub(-one, &v);
- r -= -one;
- BUG_ON(v.counter != r);
-
- INIT(v0);
r -= onestwos;
BUG_ON(atomic64_sub_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
@@ -147,6 +167,12 @@ static __init int test_atomic64(void)
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
+}
+
+static __init int test_atomics(void)
+{
+ test_atomic();
+ test_atomic64();
#ifdef CONFIG_X86
pr_info("passed for %s platform %s CX8 and %s SSE\n",
@@ -166,4 +192,4 @@ static __init int test_atomic64(void)
return 0;
}
-core_initcall(test_atomic64);
+core_initcall(test_atomics);
diff --git a/lib/average.c b/lib/average.c
deleted file mode 100644
index 114d1beae0c7..000000000000
--- a/lib/average.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- * of averages can be ULONG_MAX/(factor*weight). For performance reasons
- * factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- * influence of older values decreases. For performance reasons weight has
- * to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
- WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
- avg->weight = ilog2(weight);
- avg->factor = ilog2(factor);
- avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
- unsigned long internal = ACCESS_ONCE(avg->internal);
-
- ACCESS_ONCE(avg->internal) = internal ?
- (((internal << avg->weight) - internal) +
- (val << avg->factor)) >> avg->weight :
- (val << avg->factor);
- return avg;
-}
-EXPORT_SYMBOL(ewma_add);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index a578a0189199..814814397cce 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -367,7 +367,8 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
nchunks = nbits = totaldigits = c = 0;
do {
- chunk = ndigits = 0;
+ chunk = 0;
+ ndigits = totaldigits;
/* Get the next chunk of the bitmap */
while (buflen) {
@@ -406,9 +407,9 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
return -EOVERFLOW;
chunk = (chunk << 4) | hex_to_bin(c);
- ndigits++; totaldigits++;
+ totaldigits++;
}
- if (ndigits == 0)
+ if (ndigits == totaldigits)
return -EINVAL;
if (nchunks == 0 && chunk == 0)
continue;
@@ -505,7 +506,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int nmaskbits)
{
unsigned a, b;
- int c, old_c, totaldigits;
+ int c, old_c, totaldigits, ndigits;
const char __user __force *ubuf = (const char __user __force *)buf;
int at_start, in_range;
@@ -515,6 +516,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
at_start = 1;
in_range = 0;
a = b = 0;
+ ndigits = totaldigits;
/* Get the next cpu# or a range of cpu#'s */
while (buflen) {
@@ -528,23 +530,27 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
if (isspace(c))
continue;
- /*
- * If the last character was a space and the current
- * character isn't '\0', we've got embedded whitespace.
- * This is a no-no, so throw an error.
- */
- if (totaldigits && c && isspace(old_c))
- return -EINVAL;
-
/* A '\0' or a ',' signal the end of a cpu# or range */
if (c == '\0' || c == ',')
break;
+ /*
+ * whitespaces between digits are not allowed,
+ * but it's ok if whitespaces are on head or tail.
+ * when old_c is whilespace,
+ * if totaldigits == ndigits, whitespace is on head.
+ * if whitespace is on tail, it should not run here.
+ * as c was ',' or '\0',
+ * the last code line has broken the current loop.
+ */
+ if ((totaldigits != ndigits) && isspace(old_c))
+ return -EINVAL;
if (c == '-') {
if (at_start || in_range)
return -EINVAL;
b = 0;
in_range = 1;
+ at_start = 1;
continue;
}
@@ -557,15 +563,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
at_start = 0;
totaldigits++;
}
+ if (ndigits == totaldigits)
+ continue;
+ /* if no digit is after '-', it's wrong*/
+ if (at_start && in_range)
+ return -EINVAL;
if (!(a <= b))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
- if (!at_start) {
- while (a <= b) {
- set_bit(a, maskp);
- a++;
- }
+ while (a <= b) {
+ set_bit(a, maskp);
+ a++;
}
} while (buflen && c == ',');
return 0;
diff --git a/lib/decompress.c b/lib/decompress.c
index 528ff932d8e4..62696dff5730 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
{
const struct compress_format *cf;
- if (len < 2)
+ if (len < 2) {
+ if (name)
+ *name = NULL;
return NULL; /* Need at least this much... */
+ }
pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 6dd0335ea61b..0234361b24b8 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -743,12 +743,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long len,
+STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *outbuf,
+ unsigned char *outbuf, long olen,
long *pos,
- void(*error)(char *x))
+ void (*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
}
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d4c7891635ec..555c06bf20da 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -1,4 +1,5 @@
#ifdef STATIC
+#define PREBOOT
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
}
/* Included from initramfs et al code */
-STATIC int INIT gunzip(unsigned char *buf, long len,
+STATIC int INIT __gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *out_buf,
+ unsigned char *out_buf, long out_len,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
- size_t out_len;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
+ if (!out_len)
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
@@ -181,4 +182,24 @@ gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
-#define decompress gunzip
+#ifndef PREBOOT
+STATIC int INIT gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 40f66ebe57b7..036fc882cd72 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -196,12 +196,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long in_len,
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *output,
+ unsigned char *output, long out_len,
long *posp,
- void(*error)(char *x)
+ void (*error)(char *x)
)
{
return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 0be83af62b88..ed7a1fd819f2 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -620,7 +620,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
- if (p == 0)
+ if (p == NULL)
goto exit_2;
num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
for (i = 0; i < num_probs; i++)
@@ -667,13 +667,12 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, long in_len,
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
- unsigned char *output,
+ unsigned char *output, long out_len,
long *posp,
- void(*error)(char *x)
- )
+ void (*error)(char *x))
{
return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
}
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index b94a31bdd87d..f4c158e3a022 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -31,6 +31,7 @@
*/
#ifdef STATIC
+#define PREBOOT
#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
@@ -287,4 +288,14 @@ exit:
return ret;
}
-#define decompress unlzo
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unlzo(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index b07a78340e9d..25d59a95bd66 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -394,4 +394,14 @@ error_alloc_state:
* This macro is used by architecture-specific files to decompress
* the kernel image.
*/
-#define decompress unxz
+#ifdef XZ_PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unxz(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/devres.c b/lib/devres.c
index fbe2aac522e6..f13a2468ff39 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap);
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
- * Checks that a resource is a valid memory region, requests the memory region
- * and ioremaps it either as cacheable or as non-cacheable memory depending on
- * the resource's flags. All operations are managed and will be undone on
- * driver detach.
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example:
@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
return IOMEM_ERR_PTR(-EBUSY);
}
- if (res->flags & IORESOURCE_CACHEABLE)
- dest_ptr = devm_ioremap(dev, res->start, size);
- else
- dest_ptr = devm_ioremap_nocache(dev, res->start, size);
-
+ dest_ptr = devm_ioremap(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ae4b65e17e64..dace71fe41f7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
unsigned long flags;
phys_addr_t cln;
+ if (dma_debug_disabled())
+ return;
+
if (!page)
return;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index daf0afb6d979..116a166b096f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -160,6 +160,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
pool->min_alloc_order = min_alloc_order;
pool->algo = gen_pool_first_fit;
pool->data = NULL;
+ pool->name = NULL;
}
return pool;
}
@@ -252,8 +253,8 @@ void gen_pool_destroy(struct gen_pool *pool)
kfree(chunk);
}
+ kfree_const(pool->name);
kfree(pool);
- return;
}
EXPORT_SYMBOL(gen_pool_destroy);
@@ -570,53 +571,88 @@ static void devm_gen_pool_release(struct device *dev, void *res)
gen_pool_destroy(*(struct gen_pool **)res);
}
+static int devm_gen_pool_match(struct device *dev, void *res, void *data)
+{
+ struct gen_pool **p = res;
+
+ /* NULL data matches only a pool without an assigned name */
+ if (!data && !(*p)->name)
+ return 1;
+
+ if (!data || !(*p)->name)
+ return 0;
+
+ return !strcmp((*p)->name, data);
+}
+
+/**
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *gen_pool_get(struct device *dev, const char *name)
+{
+ struct gen_pool **p;
+
+ p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
+ (void *)name);
+ if (!p)
+ return NULL;
+ return *p;
+}
+EXPORT_SYMBOL_GPL(gen_pool_get);
+
/**
* devm_gen_pool_create - managed gen_pool_create
* @dev: device that provides the gen_pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
- * @nid: node id of the node the pool structure should be allocated on, or -1
+ * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface. The pool will be
* automatically destroyed by the device management code.
*/
struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
- int nid)
+ int nid, const char *name)
{
struct gen_pool **ptr, *pool;
+ const char *pool_name = NULL;
+
+ /* Check that genpool to be created is uniquely addressed on device */
+ if (gen_pool_get(dev, name))
+ return ERR_PTR(-EINVAL);
+
+ if (name) {
+ pool_name = kstrdup_const(name, GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+ }
ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
- return NULL;
+ goto free_pool_name;
pool = gen_pool_create(min_alloc_order, nid);
- if (pool) {
- *ptr = pool;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
+ if (!pool)
+ goto free_devres;
+
+ *ptr = pool;
+ pool->name = pool_name;
+ devres_add(dev, ptr);
return pool;
-}
-EXPORT_SYMBOL(devm_gen_pool_create);
-/**
- * gen_pool_get - Obtain the gen_pool (if any) for a device
- * @dev: device to retrieve the gen_pool from
- *
- * Returns the gen_pool for the device if one is present, or NULL.
- */
-struct gen_pool *gen_pool_get(struct device *dev)
-{
- struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
- NULL);
+free_devres:
+ devres_free(ptr);
+free_pool_name:
+ kfree_const(pool_name);
- if (!p)
- return NULL;
- return *p;
+ return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL_GPL(gen_pool_get);
+EXPORT_SYMBOL(devm_gen_pool_create);
#ifdef CONFIG_OF
/**
@@ -633,16 +669,30 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index)
{
struct platform_device *pdev;
- struct device_node *np_pool;
+ struct device_node *np_pool, *parent;
+ const char *name = NULL;
+ struct gen_pool *pool = NULL;
np_pool = of_parse_phandle(np, propname, index);
if (!np_pool)
return NULL;
+
pdev = of_find_device_by_node(np_pool);
+ if (!pdev) {
+ /* Check if named gen_pool is created by parent node device */
+ parent = of_get_parent(np_pool);
+ pdev = of_find_device_by_node(parent);
+ of_node_put(parent);
+
+ of_property_read_string(np_pool, "label", &name);
+ if (!name)
+ name = np_pool->name;
+ }
+ if (pdev)
+ pool = gen_pool_get(&pdev->dev, name);
of_node_put(np_pool);
- if (!pdev)
- return NULL;
- return gen_pool_get(&pdev->dev);
+
+ return pool;
}
EXPORT_SYMBOL_GPL(of_gen_pool_get);
#endif /* CONFIG_OF */
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 7ea09699855d..8d74c20d8595 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <asm/unaligned.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
+ get_unaligned(ptr8 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "",
- *(ptr4 + j));
+ get_unaligned(ptr4 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "",
- *(ptr2 + j));
+ get_unaligned(ptr2 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..b1c93e94ca7a 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
static inline bool need_flush(struct iommu_map_table *iommu)
{
- return (iommu->lazy_flush != NULL &&
- (iommu->flags & IOMMU_NEED_FLUSH) != 0);
+ return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
}
static inline void set_flush(struct iommu_map_table *iommu)
@@ -119,7 +118,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
unsigned long align_mask = 0;
if (align_order > 0)
- align_mask = 0xffffffffffffffffl >> (64 - align_order);
+ align_mask = ~0ul >> (BITS_PER_LONG - align_order);
/* Sanity check */
if (unlikely(npages == 0)) {
@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
goto bail;
}
}
- if (n < pool->hint || need_flush(iommu)) {
+ if (iommu->lazy_flush &&
+ (n < pool->hint || need_flush(iommu))) {
clear_flush(iommu);
iommu->lazy_flush(iommu);
}
diff --git a/lib/klist.c b/lib/klist.c
index 89b485a2a58d..d74cf7a29afd 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -324,6 +324,47 @@ static struct klist_node *to_klist_node(struct list_head *n)
}
/**
+ * klist_prev - Ante up prev node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the prev node, increment its reference
+ * count, drop the lock, and return that prev node.
+ */
+struct klist_node *klist_prev(struct klist_iter *i)
+{
+ void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *prev;
+
+ spin_lock(&i->i_klist->k_lock);
+
+ if (last) {
+ prev = to_klist_node(last->n_node.prev);
+ if (!klist_dec_and_del(last))
+ put = NULL;
+ } else
+ prev = to_klist_node(i->i_klist->k_list.prev);
+
+ i->i_cur = NULL;
+ while (prev != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(prev))) {
+ kref_get(&prev->n_ref);
+ i->i_cur = prev;
+ break;
+ }
+ prev = to_klist_node(prev->n_node.prev);
+ }
+
+ spin_unlock(&i->i_klist->k_lock);
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_prev);
+
+/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
diff --git a/lib/kobject.c b/lib/kobject.c
index 2e3bd01964a9..3e3a5c3cb330 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -337,8 +337,9 @@ error:
}
EXPORT_SYMBOL(kobject_init);
-static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
- const char *fmt, va_list vargs)
+static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, va_list vargs)
{
int retval;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index ec8da78df9be..94be244e8441 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -152,7 +152,7 @@ int kstrtoll(const char *s, unsigned int base, long long *res)
rv = _kstrtoull(s + 1, base, &tmp);
if (rv < 0)
return rv;
- if ((long long)(-tmp) >= 0)
+ if ((long long)-tmp > 0)
return -ERANGE;
*res = -tmp;
} else {
diff --git a/lib/lockref.c b/lib/lockref.c
index 494994bf17c8..5a92189ad711 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,14 +4,6 @@
#if USE_CMPXCHG_LOCKREF
/*
- * Allow weakly-ordered memory architectures to provide barrier-less
- * cmpxchg semantics for lockref updates.
- */
-#ifndef cmpxchg64_relaxed
-# define cmpxchg64_relaxed cmpxchg64
-#endif
-
-/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index bc0a1da8afba..95c52a95259e 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -146,18 +146,25 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
uint8_t *p;
mpi_limb_t alimb;
unsigned int n = mpi_get_size(a);
- int i;
+ int i, lzeros = 0;
- if (buf_len < n || !buf)
+ if (buf_len < n || !buf || !nbytes)
return -EINVAL;
if (sign)
*sign = a->sign;
- if (nbytes)
- *nbytes = n;
+ p = (void *)&a->d[a->nlimbs] - 1;
+
+ for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+ if (!*p)
+ lzeros++;
+ else
+ break;
+ }
p = buf;
+ *nbytes = n - lzeros;
for (i = a->nlimbs - 1; i >= 0; i--) {
alimb = a->d[i];
@@ -178,6 +185,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
#else
#error please implement for this limb size.
#endif
+
+ if (lzeros > 0) {
+ if (lzeros >= sizeof(alimb)) {
+ p -= sizeof(alimb);
+ } else {
+ mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
+ mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+ + lzeros;
+ *limb1 = *limb2;
+ p -= lzeros;
+ }
+ lzeros -= sizeof(alimb);
+ }
}
return 0;
}
@@ -197,7 +217,7 @@ EXPORT_SYMBOL_GPL(mpi_read_buffer);
*/
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
{
- uint8_t *buf, *p;
+ uint8_t *buf;
unsigned int n;
int ret;
@@ -220,14 +240,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
kfree(buf);
return NULL;
}
-
- /* this is sub-optimal but we need to do the shift operation
- * because the caller has to free the returned buffer */
- for (p = buf; !*p && *nbytes; p++, --*nbytes)
- ;
- if (p != buf)
- memmove(buf, p, *nbytes);
-
return buf;
}
EXPORT_SYMBOL_GPL(mpi_get_buffer);
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 000000000000..88d3d32e5923
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,162 @@
+/*
+ * NMI backtrace support
+ *
+ * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
+ * with the following header:
+ *
+ * HW NMI watchdog support
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Arch specific calls to support NMI watchdog
+ *
+ * Bits copied from original nmi.c file
+ */
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <linux/nmi.h>
+#include <linux/seq_buf.h>
+
+#ifdef arch_trigger_all_cpu_backtrace
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+static cpumask_t printtrace_mask;
+
+#define NMI_BUF_SIZE 4096
+
+struct nmi_seq_buf {
+ unsigned char buffer[NMI_BUF_SIZE];
+ struct seq_buf seq;
+};
+
+/* Safe printing in NMI context */
+static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
+{
+ const char *buf = s->buffer + start;
+
+ printk("%.*s", (end - start) + 1, buf);
+}
+
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask))
+{
+ struct nmi_seq_buf *s;
+ int i, cpu, this_cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+ if (!include_self)
+ cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+
+ cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
+
+ /*
+ * Set up per_cpu seq_buf buffers that the NMIs running on the other
+ * CPUs will write to.
+ */
+ for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
+ s = &per_cpu(nmi_print_seq, cpu);
+ seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
+ }
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending NMI to %s CPUs:\n",
+ (include_self ? "all" : "other"));
+ raise(to_cpumask(backtrace_mask));
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+ mdelay(1);
+ touch_softlockup_watchdog();
+ }
+
+ /*
+ * Now that all the NMIs have triggered, we can dump out their
+ * back traces safely to the console.
+ */
+ for_each_cpu(cpu, &printtrace_mask) {
+ int len, last_i = 0;
+
+ s = &per_cpu(nmi_print_seq, cpu);
+ len = seq_buf_used(&s->seq);
+ if (!len)
+ continue;
+
+ /* Print line by line. */
+ for (i = 0; i < len; i++) {
+ if (s->buffer[i] == '\n') {
+ print_seq_line(s, last_i, i);
+ last_i = i + 1;
+ }
+ }
+ /* Check if there was a partial line. */
+ if (last_i < len) {
+ print_seq_line(s, last_i, len - 1);
+ pr_cont("\n");
+ }
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+ put_cpu();
+}
+
+/*
+ * It is not safe to call printk() directly from NMI handlers.
+ * It may be fine if the NMI detected a lock up and we have no choice
+ * but to do so, but doing a NMI on all other CPUs to get a back trace
+ * can be done with a sysrq-l. We don't want that to lock up, which
+ * can happen if the NMI interrupts a printk in progress.
+ *
+ * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
+ * the content into a per cpu seq_buf buffer. Then when the NMIs are
+ * all done, we can safely dump the contents of the seq_buf to a printk()
+ * from a non NMI context.
+ */
+static int nmi_vprintk(const char *fmt, va_list args)
+{
+ struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+ unsigned int len = seq_buf_used(&s->seq);
+
+ seq_buf_vprintf(&s->seq, fmt, args);
+ return seq_buf_used(&s->seq) - len;
+}
+
+bool nmi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ printk_func_t printk_func_save = this_cpu_read(printk_func);
+
+ /* Replace printk to write into the NMI seq */
+ this_cpu_write(printk_func, nmi_vprintk);
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ this_cpu_write(printk_func, printk_func_save);
+
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(nmi_cpu_backtrace);
+#endif
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index bcce5f149310..c10fba461454 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -41,17 +41,59 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
len = maxlen;
if (flags & IORESOURCE_IO)
return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
/* What? */
return NULL;
}
EXPORT_SYMBOL(pci_iomap_range);
/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
* pci_iomap - create a virtual mapping cookie for a PCI BAR
* @dev: PCI device that owns the BAR
* @bar: BAR number
@@ -70,4 +112,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
#endif /* CONFIG_PCI */
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index d9ad6ee284f4..7076ef1ba3dd 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -40,9 +40,20 @@
(unsigned long)bytes, ptrs); \
kernel_neon_end(); \
} \
+ static void raid6_neon ## _n ## _xor_syndrome(int disks, \
+ int start, int stop, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_neon ## _n ## _xor_syndrome_real(int, \
+ int, int, unsigned long, void**); \
+ kernel_neon_begin(); \
+ raid6_neon ## _n ## _xor_syndrome_real(disks, \
+ start, stop, (unsigned long)bytes, ptrs); \
+ kernel_neon_end(); \
+ } \
struct raid6_calls const raid6_neonx ## _n = { \
raid6_neon ## _n ## _gen_syndrome, \
- NULL, /* XOR not yet implemented */ \
+ raid6_neon ## _n ## _xor_syndrome, \
raid6_have_neon, \
"neonx" #_n, \
0 \
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index 1b9ed793342d..4fa51b761dd0 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -3,6 +3,7 @@
* neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
*
* Copyright (C) 2012 Rob Herring
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Based on altivec.uc:
* Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
@@ -78,3 +79,48 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
vst1q_u8(&q[d+NSIZE*$$], wq$$);
}
}
+
+void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ uint8_t **dptr = (uint8_t **)ptrs;
+ uint8_t *p, *q;
+ int d, z, z0;
+
+ register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ const unative_t x1d = NBYTES(0x1d);
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+ wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$);
+
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+ wp$$ = veorq_u8(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ w1$$ = veorq_u8(w1$$, w2$$);
+ wq$$ = veorq_u8(w1$$, wd$$);
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ }
+ w1$$ = vld1q_u8(&q[d+NSIZE*$$]);
+ wq$$ = veorq_u8(wq$$, w1$$);
+
+ vst1q_u8(&p[d+NSIZE*$$], wp$$);
+ vst1q_u8(&q[d+NSIZE*$$], wq$$);
+ }
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a60a6d335a91..a54ff8949f91 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
new_tbl, new_hash);
- if (rht_is_a_nulls(head))
- INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
- else
- RCU_INIT_POINTER(entry->next, head);
+ RCU_INIT_POINTER(entry->next, head);
rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
spin_unlock(new_bucket_lock);
@@ -610,6 +607,8 @@ next:
iter->skip = 0;
}
+ iter->p = NULL;
+
/* Ensure we see any new tables. */
smp_rmb();
@@ -620,8 +619,6 @@ next:
return ERR_PTR(-EAGAIN);
}
- iter->p = NULL;
-
return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d105a9f56878..bafa9933fa76 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len);
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
- struct scatterlist *ret = &sgl[nents - 1];
-#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
-#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 000000000000..b063410c3593
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * Scatterlist splitting helpers.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct sg_splitter {
+ struct scatterlist *in_sg0;
+ int nents;
+ off_t skip_sg0;
+ unsigned int length_last_sg;
+
+ struct scatterlist *out_sg;
+};
+
+static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
+ off_t skip, const size_t *sizes,
+ struct sg_splitter *splitters, bool mapped)
+{
+ int i;
+ unsigned int sglen;
+ size_t size = sizes[0], len;
+ struct sg_splitter *curr = splitters;
+ struct scatterlist *sg;
+
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].in_sg0 = NULL;
+ splitters[i].nents = 0;
+ }
+
+ for_each_sg(in, sg, nents, i) {
+ sglen = mapped ? sg_dma_len(sg) : sg->length;
+ if (skip > sglen) {
+ skip -= sglen;
+ continue;
+ }
+
+ len = min_t(size_t, size, sglen - skip);
+ if (!curr->in_sg0) {
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ }
+ size -= len;
+ curr->nents++;
+ curr->length_last_sg = len;
+
+ while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
+ curr++;
+ size = *(++sizes);
+ skip += len;
+ len = min_t(size_t, size, sglen - skip);
+
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ curr->nents = 1;
+ curr->length_last_sg = len;
+ size -= len;
+ }
+ skip = 0;
+
+ if (!size && --nb_splits > 0) {
+ curr++;
+ size = *(++sizes);
+ }
+
+ if (!nb_splits)
+ break;
+ }
+
+ return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
+}
+
+static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ *out_sg = *in_sg;
+ if (!j) {
+ out_sg->offset += split->skip_sg0;
+ out_sg->length -= split->skip_sg0;
+ } else {
+ out_sg->offset = 0;
+ }
+ sg_dma_address(out_sg) = 0;
+ sg_dma_len(out_sg) = 0;
+ in_sg = sg_next(in_sg);
+ }
+ out_sg[-1].length = split->length_last_sg;
+ sg_mark_end(out_sg - 1);
+ }
+}
+
+static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ sg_dma_address(out_sg) = sg_dma_address(in_sg);
+ sg_dma_len(out_sg) = sg_dma_len(in_sg);
+ if (!j) {
+ sg_dma_address(out_sg) += split->skip_sg0;
+ sg_dma_len(out_sg) -= split->skip_sg0;
+ }
+ in_sg = sg_next(in_sg);
+ }
+ sg_dma_len(--out_sg) = split->length_last_sg;
+ }
+}
+
+/**
+ * sg_split - split a scatterlist into several scatterlists
+ * @in: the input sg list
+ * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
+ * @skip: the number of bytes to skip in the input sg list
+ * @nb_splits: the number of desired sg outputs
+ * @split_sizes: the respective size of each output sg list in bytes
+ * @out: an array where to store the allocated output sg lists
+ * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
+ * be NULL if sglist not already mapped (in_mapped_nents = 0)
+ * @gfp_mask: the allocation flag
+ *
+ * This function splits the input sg list into nb_splits sg lists, which are
+ * allocated and stored into out.
+ * The @in is split into :
+ * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
+ * - @out[1], which covers bytes [@skip + split_sizes[0] ..
+ * @skip + @split_sizes[0] + @split_sizes[1] -1]
+ * etc ...
+ * It will be the caller's duty to kfree() out array members.
+ *
+ * Returns 0 upon success, or error code
+ */
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask)
+{
+ int i, ret;
+ struct sg_splitter *splitters;
+
+ splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+ if (!splitters)
+ return -ENOMEM;
+
+ ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
+ splitters, false);
+ if (ret < 0)
+ goto err;
+
+ ret = -ENOMEM;
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].out_sg = kmalloc_array(splitters[i].nents,
+ sizeof(struct scatterlist),
+ gfp_mask);
+ if (!splitters[i].out_sg)
+ goto err;
+ }
+
+ /*
+ * The order of these 3 calls is important and should be kept.
+ */
+ sg_split_phys(splitters, nb_splits);
+ ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+ split_sizes, splitters, true);
+ if (ret < 0)
+ goto err;
+ sg_split_mapped(splitters, nb_splits);
+
+ for (i = 0; i < nb_splits; i++) {
+ out[i] = splitters[i].out_sg;
+ if (out_mapped_nents)
+ out_mapped_nents[i] = splitters[i].nents;
+ }
+
+ kfree(splitters);
+ return 0;
+
+err:
+ for (i = 0; i < nb_splits; i++)
+ kfree(splitters[i].out_sg);
+ kfree(splitters);
+ return ret;
+}
+EXPORT_SYMBOL(sg_split);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index adc98e1825ba..1feed6a2b12a 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -38,11 +38,9 @@ void show_mem(unsigned int filter)
printk("%lu pages RAM\n", total);
printk("%lu pages HighMem/MovableOnly\n", highmem);
+ printk("%lu pages reserved\n", reserved);
#ifdef CONFIG_CMA
- printk("%lu pages reserved\n", (reserved - totalcma_pages));
printk("%lu pages cma reserved\n", totalcma_pages);
-#else
- printk("%lu pages reserved\n", reserved);
#endif
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index c98ae818eb4e..5939f63d90cd 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
}
exp = divisor[units] / (u32)blk_size;
- if (size >= exp) {
+ /*
+ * size must be strictly greater than exp here to ensure that remainder
+ * is greater than divisor[units] coming out of the if below.
+ */
+ if (size > exp) {
remainder = do_div(size, divisor[units]);
remainder *= blk_size;
i++;
@@ -410,7 +414,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* @dst: destination buffer (escaped)
* @osz: destination buffer size
* @flags: combination of the flags (bitwise OR):
- * %ESCAPE_SPACE:
+ * %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
@@ -432,16 +436,18 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
- * @esc: NULL-terminated string of characters any of which, if found in
- * the source, has to be escaped
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
*
* Description:
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
* 1. The character is matched to the printable class, if asked, and in
* case of match it passes through to the output.
- * 2. The character is not matched to the one from @esc string and thus
- * must go as is to the output.
+ * 2. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
* 3. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
@@ -458,11 +464,11 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* dst for a '\0' terminator if and only if ret < osz.
*/
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
- unsigned int flags, const char *esc)
+ unsigned int flags, const char *only)
{
char *p = dst;
char *end = p + osz;
- bool is_dict = esc && *esc;
+ bool is_dict = only && *only;
while (isz--) {
unsigned char c = *src++;
@@ -471,7 +477,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
* Apply rules in the following sequence:
* - the character is printable, when @flags has
* %ESCAPE_NP bit set
- * - the @esc string is supplied and does not contain a
+ * - the @only string is supplied and does not contain a
* character under question
* - the character doesn't fall into a class of symbols
* defined by given @flags
@@ -479,7 +485,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
* output buffer.
*/
if ((flags & ESCAPE_NP && isprint(c)) ||
- (is_dict && !strchr(esc, c))) {
+ (is_dict && !strchr(only, c))) {
/* do nothing */
} else {
if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index 4137bca5f8e8..f355f67169b6 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -260,6 +260,7 @@ static void __init test_kstrtoll_ok(void)
{"4294967297", 10, 4294967297LL},
{"9223372036854775807", 10, 9223372036854775807LL},
+ {"-0", 10, 0LL},
{"-1", 10, -1LL},
{"-2", 10, -2LL},
{"-9223372036854775808", 10, LLONG_MIN},
@@ -277,11 +278,6 @@ static void __init test_kstrtoll_fail(void)
{"-9223372036854775809", 10},
{"-18446744073709551614", 10},
{"-18446744073709551615", 10},
- /* negative zero isn't an integer in Linux */
- {"-0", 0},
- {"-0", 8},
- {"-0", 10},
- {"-0", 16},
/* sign is first character if any */
{"-+1", 0},
{"-+1", 8},
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7f58c735d745..d1377390b3ad 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -18,10 +18,12 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/filter.h>
+#include <linux/bpf.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
+#include <linux/highmem.h>
/* General test specific settings */
#define MAX_SUBTESTS 3
@@ -55,6 +57,7 @@
/* Flags that can be passed to test cases */
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
+#define FLAG_SKB_FRAG BIT(2)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -80,6 +83,7 @@ struct bpf_test {
__u32 result;
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
+ __u8 frag_data[MAX_DATA];
};
/* Large test cases need separate allocation and fill handler. */
@@ -355,6 +359,81 @@ static int bpf_fill_ja(struct bpf_test *self)
return __bpf_fill_ja(self, 12, 9);
}
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 1; i += 2) {
+ insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+ insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i = 0, j, k = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+ for (j = 0; j < PUSH_CNT; j++) {
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+ i++;
+ insn[i++] = BPF_MOV64_REG(R1, R6);
+ insn[i++] = BPF_MOV64_IMM(R2, 1);
+ insn[i++] = BPF_MOV64_IMM(R3, 2);
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ bpf_skb_vlan_push_proto.func - __bpf_call_base);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+ i++;
+ }
+
+ for (j = 0; j < PUSH_CNT; j++) {
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+ i++;
+ insn[i++] = BPF_MOV64_REG(R1, R6);
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+ insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+ i++;
+ }
+ if (++k < 5)
+ goto loop;
+
+ for (; i < len - 1; i++)
+ insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -3674,6 +3753,9 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -3708,6 +3790,9 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4392,6 +4477,618 @@ static struct bpf_test tests[] = {
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
},
+ {
+ "BPF_MAXINSNS: ld_abs+get_processor_id",
+ { },
+ CLASSIC,
+ { },
+ { { 1, 0xbee } },
+ .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ },
+ {
+ "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+ { },
+ INTERNAL,
+ { 0x34 },
+ { { 1, 0xbef } },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ },
+ /*
+ * LD_IND / LD_ABS on fragmented SKBs
+ */
+ {
+ "LD_IND byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_IND word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ /*
+ * LD_IND / LD_ABS on non fragmented SKBs
+ */
+ {
+ /*
+ * this tests that the JIT/interpreter correctly resets X
+ * before using it in an LD_IND instruction.
+ */
+ "LD_IND byte default X",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x1] = 0x42 },
+ { {0x40, 0x42 } },
+ },
+ {
+ "LD_IND byte positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 } },
+ },
+ {
+ "LD_IND byte negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x05 } },
+ },
+ {
+ "LD_IND halfword positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_IND halfword negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xbb66 } },
+ },
+ {
+ "LD_IND halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0x66cc } },
+ },
+ {
+ "LD_IND word positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xee99ffaa } },
+ },
+ {
+ "LD_IND word negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xbb66cc77 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x55bb66cc } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x66cc77dd } },
+ },
+ {
+ "LD_ABS byte",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xcc } },
+ },
+ {
+ "LD_ABS halfword",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_ABS halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x99ff } },
+ },
+ {
+ "LD_ABS word",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88ee99 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x77dd88ee } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x88ee99ff } },
+ },
+ /*
+ * verify that the interpreter or JIT correctly sets A and X
+ * to 0.
+ */
+ {
+ "ADD default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A + X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "ADD default A",
+ .u.insns = {
+ /*
+ * A = A + 0x42
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "SUB default X",
+ .u.insns = {
+ /*
+ * A = 0x66
+ * A = A - X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "SUB default A",
+ .u.insns = {
+ /*
+ * A = A - -0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "MUL default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A * X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "MUL default A",
+ .u.insns = {
+ /*
+ * A = A * 0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A / X ; this halt the filter execution if X is 0
+ * ret 0x42
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default A",
+ .u.insns = {
+ /*
+ * A = A / 1
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "JMP EQ default A",
+ .u.insns = {
+ /*
+ * cmp A, 0x0, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "JMP EQ default X",
+ .u.insns = {
+ /*
+ * A = 0x0
+ * cmp A, X, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
};
static struct net_device dev;
@@ -4427,6 +5124,9 @@ static struct sk_buff *populate_skb(char *buf, int size)
static void *generate_test_data(struct bpf_test *test, int sub)
{
+ struct sk_buff *skb;
+ struct page *page;
+
if (test->aux & FLAG_NO_DATA)
return NULL;
@@ -4434,7 +5134,38 @@ static void *generate_test_data(struct bpf_test *test, int sub)
* subtests generate skbs of different sizes based on
* the same data.
*/
- return populate_skb(test->data, test->test[sub].data_size);
+ skb = populate_skb(test->data, test->test[sub].data_size);
+ if (!skb)
+ return NULL;
+
+ if (test->aux & FLAG_SKB_FRAG) {
+ /*
+ * when the test requires a fragmented skb, add a
+ * single fragment to the skb, filled with
+ * test->frag_data.
+ */
+ void *ptr;
+
+ page = alloc_page(GFP_KERNEL);
+
+ if (!page)
+ goto err_kfree_skb;
+
+ ptr = kmap(page);
+ if (!ptr)
+ goto err_free_page;
+ memcpy(ptr, test->frag_data, MAX_DATA);
+ kunmap(page);
+ skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+ }
+
+ return skb;
+
+err_free_page:
+ __free_page(page);
+err_kfree_skb:
+ kfree_skb(skb);
+ return NULL;
}
static void release_test_data(const struct bpf_test *test, void *data)
@@ -4515,6 +5246,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
}
fp->len = flen;
+ /* Type doesn't really matter here as long as it's not unspec. */
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp);
@@ -4545,14 +5278,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- start = ktime_to_us(ktime_get());
+ start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
- finish = ktime_to_us(ktime_get());
+ finish = ktime_get_ns();
- *duration = (finish - start) * 1000ULL;
+ *duration = finish - start;
do_div(*duration, runs);
return ret;
@@ -4572,6 +5305,11 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
break;
data = generate_test_data(test, i);
+ if (!data && !(test->aux & FLAG_NO_DATA)) {
+ pr_cont("data generation failed ");
+ err_cnt++;
+ break;
+ }
ret = __run_one(fp, data, runs, &duration);
release_test_data(test, data);
@@ -4587,10 +5325,73 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
return err_cnt;
}
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+module_param_array(test_range, int, NULL, 0);
+
+static __init int find_test_index(const char *test_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!strcmp(tests[i].descr, test_name))
+ return i;
+ }
+ return -1;
+}
+
static __init int prepare_bpf_tests(void)
{
int i;
+ if (test_id >= 0) {
+ /*
+ * if a test_id was specified, use test_range to
+ * cover only that test.
+ */
+ if (test_id >= ARRAY_SIZE(tests)) {
+ pr_err("test_bpf: invalid test_id specified.\n");
+ return -EINVAL;
+ }
+
+ test_range[0] = test_id;
+ test_range[1] = test_id;
+ } else if (*test_name) {
+ /*
+ * if a test_name was specified, find it and setup
+ * test_range to cover only that test.
+ */
+ int idx = find_test_index(test_name);
+
+ if (idx < 0) {
+ pr_err("test_bpf: no test named '%s' found.\n",
+ test_name);
+ return -EINVAL;
+ }
+ test_range[0] = idx;
+ test_range[1] = idx;
+ } else {
+ /*
+ * check that the supplied test_range is valid.
+ */
+ if (test_range[0] >= ARRAY_SIZE(tests) ||
+ test_range[1] >= ARRAY_SIZE(tests) ||
+ test_range[0] < 0 || test_range[1] < 0) {
+ pr_err("test_bpf: test_range is out of bound.\n");
+ return -EINVAL;
+ }
+
+ if (test_range[1] < test_range[0]) {
+ pr_err("test_bpf: test_range is ending before it starts.\n");
+ return -EINVAL;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (tests[i].fill_helper &&
tests[i].fill_helper(&tests[i]) < 0)
@@ -4610,6 +5411,11 @@ static __init void destroy_bpf_tests(void)
}
}
+static bool exclude_test(int test_id)
+{
+ return test_id < test_range[0] || test_id > test_range[1];
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
@@ -4619,6 +5425,9 @@ static __init int test_bpf(void)
struct bpf_prog *fp;
int err;
+ if (exclude_test(i))
+ continue;
+
pr_info("#%d %s ", i, tests[i].descr);
fp = generate_filter(i, &err);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 098c08eddfab..c1efb1b61017 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -65,7 +65,7 @@ static noinline void __init kmalloc_node_oob_right(void)
kfree(ptr);
}
-static noinline void __init kmalloc_large_oob_rigth(void)
+static noinline void __init kmalloc_large_oob_right(void)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
@@ -114,7 +114,7 @@ static noinline void __init kmalloc_oob_krealloc_less(void)
kfree(ptr1);
return;
}
- ptr2[size1] = 'x';
+ ptr2[size2] = 'x';
kfree(ptr2);
}
@@ -259,7 +259,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
- kmalloc_large_oob_rigth();
+ kmalloc_large_oob_right();
kmalloc_oob_krealloc_more();
kmalloc_oob_krealloc_less();
kmalloc_oob_16();
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c90777eae1f8..8c1ad1ced72c 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,10 +16,14 @@
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
+#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -44,11 +48,21 @@ static int size = 8;
module_param(size, int, 0);
MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
struct test_obj {
int value;
struct rhash_head node;
};
+struct thread_data {
+ int id;
+ struct task_struct *task;
+ struct test_obj *objs;
+};
+
static struct test_obj array[MAX_ENTRIES];
static struct rhashtable_params test_rht_params = {
@@ -59,6 +73,9 @@ static struct rhashtable_params test_rht_params = {
.nulls_base = (3U << RHT_BASE_SHIFT),
};
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
@@ -87,6 +104,8 @@ static int __init test_rht_lookup(struct rhashtable *ht)
return -EINVAL;
}
}
+
+ cond_resched_rcu();
}
return 0;
@@ -160,6 +179,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
} else if (err) {
return err;
}
+
+ cond_resched();
}
if (insert_fails)
@@ -183,6 +204,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
rhashtable_remove_fast(ht, &obj->node, test_rht_params);
}
+
+ cond_resched();
}
end = ktime_get_ns();
@@ -193,10 +216,97 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
static struct rhashtable ht;
+static int thread_lookup_test(struct thread_data *tdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj;
+ int key = (tdata->id << 16) | i;
+
+ obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+ if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+ pr_err(" found unexpected object %d\n", key);
+ err++;
+ } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+ pr_err(" object %d not found!\n", key);
+ err++;
+ } else if (obj && (obj->value != key)) {
+ pr_err(" wrong object returned (got %d, expected %d)\n",
+ obj->value, key);
+ err++;
+ }
+ }
+ return err;
+}
+
+static int threadfunc(void *data)
+{
+ int i, step, err = 0, insert_fails = 0;
+ struct thread_data *tdata = data;
+
+ up(&prestart_sem);
+ if (down_interruptible(&startup_sem))
+ pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
+
+ for (i = 0; i < entries; i++) {
+ tdata->objs[i].value = (tdata->id << 16) | i;
+ err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+ test_rht_params);
+ if (err == -ENOMEM || err == -EBUSY) {
+ tdata->objs[i].value = TEST_INSERT_FAIL;
+ insert_fails++;
+ } else if (err) {
+ pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+ if (insert_fails)
+ pr_info(" thread[%d]: %d insert failures\n",
+ tdata->id, insert_fails);
+
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test failed\n",
+ tdata->id);
+ goto out;
+ }
+
+ for (step = 10; step > 0; step--) {
+ for (i = 0; i < entries; i += step) {
+ if (tdata->objs[i].value == TEST_INSERT_FAIL)
+ continue;
+ err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+ test_rht_params);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_remove_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ tdata->objs[i].value = TEST_INSERT_FAIL;
+ }
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+out:
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return err;
+}
+
static int __init test_rht_init(void)
{
- int i, err;
+ int i, err, started_threads = 0, failed_threads = 0;
u64 total_time = 0;
+ struct thread_data *tdata;
+ struct test_obj *objs;
entries = min(entries, MAX_ENTRIES);
@@ -232,6 +342,57 @@ static int __init test_rht_init(void)
do_div(total_time, runs);
pr_info("Average test time: %llu\n", total_time);
+ if (!tcount)
+ return 0;
+
+ pr_info("Testing concurrent rhashtable access from %d threads\n",
+ tcount);
+ sema_init(&prestart_sem, 1 - tcount);
+ tdata = vzalloc(tcount * sizeof(struct thread_data));
+ if (!tdata)
+ return -ENOMEM;
+ objs = vzalloc(tcount * entries * sizeof(struct test_obj));
+ if (!objs) {
+ vfree(tdata);
+ return -ENOMEM;
+ }
+
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ vfree(tdata);
+ vfree(objs);
+ return -EINVAL;
+ }
+ for (i = 0; i < tcount; i++) {
+ tdata[i].id = i;
+ tdata[i].objs = objs + i * entries;
+ tdata[i].task = kthread_run(threadfunc, &tdata[i],
+ "rhashtable_thrad[%d]", i);
+ if (IS_ERR(tdata[i].task))
+ pr_err(" kthread_run failed for thread %d\n", i);
+ else
+ started_threads++;
+ }
+ if (down_interruptible(&prestart_sem))
+ pr_err(" down interruptible failed\n");
+ for (i = 0; i < tcount; i++)
+ up(&startup_sem);
+ for (i = 0; i < tcount; i++) {
+ if (IS_ERR(tdata[i].task))
+ continue;
+ if ((err = kthread_stop(tdata[i].task))) {
+ pr_warn("Test failed: thread %d returned: %d\n",
+ i, err);
+ failed_threads++;
+ }
+ }
+ pr_info("Started %d threads, %d failed\n",
+ started_threads, failed_threads);
+ rhashtable_destroy(&ht);
+ vfree(tdata);
+ vfree(objs);
return 0;
}
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 000000000000..729447aea02f
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,68 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+ invert_key(&base_inv_old_true_key);
+ invert_key(&base_inv_old_false_key);
+ invert_key(&base_inv_true_key.key);
+ invert_key(&base_inv_false_key.key);
+
+ return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 000000000000..c61b299e367f
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,225 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key = STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key = STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+ bool init_state;
+ struct static_key *key;
+ bool (*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+ ({bool func(void) { return branch(key); } func; })
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+ struct static_key *previous = NULL;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (previous != keys[i].key) {
+ invert_key(keys[i].key);
+ previous = keys[i].key;
+ }
+ }
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+ int i;
+ bool ret, init;
+
+ for (i = 0; i < size; i++) {
+ ret = static_key_enabled(keys[i].key);
+ init = keys[i].init_state;
+ if (ret != (invert ? !init : init))
+ return -EINVAL;
+ ret = keys[i].test_key();
+ if (static_key_enabled(keys[i].key)) {
+ if (!ret)
+ return -EINVAL;
+ } else {
+ if (ret)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int __init test_static_key_init(void)
+{
+ int ret;
+ int size;
+
+ struct test_key static_key_tests[] = {
+ /* internal keys - old keys */
+ {
+ .init_state = true,
+ .key = &old_true_key,
+ .test_key = test_key_func(&old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &old_false_key,
+ .test_key = test_key_func(&old_false_key, static_key_false),
+ },
+ /* internal keys - new keys */
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = test_key_func(&true_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = test_key_func(&true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = test_key_func(&false_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = test_key_func(&false_key, static_branch_unlikely),
+ },
+ /* external keys - old keys */
+ {
+ .init_state = true,
+ .key = &base_old_true_key,
+ .test_key = test_key_func(&base_old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_old_true_key,
+ .test_key = test_key_func(&base_inv_old_true_key, static_key_true),
+ },
+ {
+ .init_state = false,
+ .key = &base_old_false_key,
+ .test_key = test_key_func(&base_old_false_key, static_key_false),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_old_false_key,
+ .test_key = test_key_func(&base_inv_old_false_key, static_key_false),
+ },
+ /* external keys - new keys */
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = test_key_func(&base_true_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = test_key_func(&base_true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = test_key_func(&base_inv_true_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely),
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = test_key_func(&base_false_key, static_branch_likely),
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = test_key_func(&base_false_key, static_branch_unlikely),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = test_key_func(&base_inv_false_key, static_branch_likely),
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely),
+ },
+ };
+
+ size = ARRAY_SIZE(static_key_tests);
+
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, true);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index da39c608a28c..95cd63b43b99 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,6 +17,7 @@
*/
#include <stdarg.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
index ddf348299f24..9b1756b12743 100644
--- a/lib/zlib_deflate/deftree.c
+++ b/lib/zlib_deflate/deftree.c
@@ -35,6 +35,7 @@
/* #include "deflate.h" */
#include <linux/zutil.h>
+#include <linux/bitrev.h>
#include "defutil.h"
#ifdef DEBUG_ZLIB
@@ -146,7 +147,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
-static unsigned bi_reverse (unsigned value, int length);
static void bi_windup (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
@@ -284,7 +284,7 @@ static void tr_static_init(void)
/* The static distance tree is trivial: */
for (n = 0; n < D_CODES; n++) {
static_dtree[n].Len = 5;
- static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
}
static_init_done = 1;
}
@@ -520,7 +520,7 @@ static void gen_codes(
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
- tree[n].Code = bi_reverse(next_code[len]++, len);
+ tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index b640b6402e99..a8c370897c9f 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -293,22 +293,6 @@ void zlib_tr_stored_type_only (deflate_state *);
}
/* ===========================================================================
- * Reverse the first len bits of a code, using straightforward code (a faster
- * method would use a table)
- * IN assertion: 1 <= len <= 15
- */
-static inline unsigned bi_reverse(unsigned code, /* the value to invert */
- int len) /* its bit length */
-{
- register unsigned res = 0;
- do {
- res |= code & 1;
- code >>= 1, res <<= 1;
- } while (--len > 0);
- return res >> 1;
-}
-
-/* ===========================================================================
* Flush the bit buffer, keeping at most 7 bits in it.
*/
static inline void bi_flush(deflate_state *s)