diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 11 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/bitmap.c | 28 | ||||
-rw-r--r-- | lib/bug.c | 28 | ||||
-rw-r--r-- | lib/cmdline.c | 57 | ||||
-rw-r--r-- | lib/iov_iter.c | 8 | ||||
-rw-r--r-- | lib/kobject.c | 5 | ||||
-rw-r--r-- | lib/md5.c | 95 | ||||
-rw-r--r-- | lib/refcount.c | 169 | ||||
-rw-r--r-- | lib/sbitmap.c | 75 | ||||
-rw-r--r-- | lib/string.c | 2 | ||||
-rw-r--r-- | lib/test_user_copy.c | 1 | ||||
-rw-r--r-- | lib/usercopy.c | 26 | ||||
-rw-r--r-- | lib/vsprintf.c | 6 |
14 files changed, 333 insertions, 182 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fa16c0f82d6e..e2a617e09ab7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -130,7 +130,8 @@ config DYNAMIC_DEBUG nullarbor:~ # echo -n 'func svc_process -p' > <debugfs>/dynamic_debug/control - See Documentation/dynamic-debug-howto.txt for additional information. + See Documentation/admin-guide/dynamic-debug-howto.rst for additional + information. endmenu # "printk and dmesg options" @@ -356,7 +357,7 @@ config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ (CRIS || M68K || FRV || UML || \ - AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ + SUPERH || BLACKFIN || MN10300 || METAG) || \ ARCH_WANT_FRAME_POINTERS default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS help @@ -404,8 +405,8 @@ config MAGIC_SYSRQ by pressing various keys while holding SysRq (Alt+PrintScreen). It also works on a serial console (on PC hardware at least), if you send a BREAK and then within 5 seconds a command keypress. The - keys are documented in <file:Documentation/sysrq.txt>. Don't say Y - unless you really know what this hack does. + keys are documented in <file:Documentation/admin-guide/sysrq.rst>. + Don't say Y unless you really know what this hack does. config MAGIC_SYSRQ_DEFAULT_ENABLE hex "Enable magic SysRq key functions by default" @@ -414,7 +415,7 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE help Specifies which SysRq key functions are enabled by default. This may be set to 1 or 0 to enable or disable them all, or - to a bitmask as described in Documentation/sysrq.txt. + to a bitmask as described in Documentation/admin-guide/sysrq.rst. config MAGIC_SYSRQ_SERIAL bool "Enable magic SysRq key over serial" diff --git a/lib/Makefile b/lib/Makefile index 320ac46a8725..a155c73e3437 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ + sha1.o chacha20.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o \ @@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ - once.o refcount.o + once.o refcount.o usercopy.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o diff --git a/lib/bitmap.c b/lib/bitmap.c index 0b66f0e5eb6b..08c6ef3a2b6f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -502,11 +502,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf); * Syntax: range:used_size/group_size * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 * - * Returns 0 on success, -errno on invalid input strings. - * Error values: - * %-EINVAL: second number in range smaller than first - * %-EINVAL: invalid character in string - * %-ERANGE: bit number specified too large for mask + * Returns: 0 on success, -errno on invalid input strings. Error values: + * + * - ``-EINVAL``: second number in range smaller than first + * - ``-EINVAL``: invalid character in string + * - ``-ERANGE``: bit number specified too large for mask */ static int __bitmap_parselist(const char *buf, unsigned int buflen, int is_user, unsigned long *maskp, @@ -864,14 +864,16 @@ EXPORT_SYMBOL(bitmap_bitremap); * 11 was set in @orig had no affect on @dst. * * Example [2] for bitmap_fold() + bitmap_onto(): - * Let's say @relmap has these ten bits set: + * Let's say @relmap has these ten bits set:: + * * 40 41 42 43 45 48 53 61 74 95 + * * (for the curious, that's 40 plus the first ten terms of the * Fibonacci sequence.) * * Further lets say we use the following code, invoking * bitmap_fold() then bitmap_onto, as suggested above to - * avoid the possibility of an empty @dst result: + * avoid the possibility of an empty @dst result:: * * unsigned long *tmp; // a temporary bitmap's bits * @@ -882,22 +884,26 @@ EXPORT_SYMBOL(bitmap_bitremap); * various @orig's. I list the zero-based positions of each set bit. * The tmp column shows the intermediate result, as computed by * using bitmap_fold() to fold the @orig bitmap modulo ten - * (the weight of @relmap). + * (the weight of @relmap): * + * =============== ============== ================= * @orig tmp @dst * 0 0 40 * 1 1 41 * 9 9 95 - * 10 0 40 (*) + * 10 0 40 [#f1]_ * 1 3 5 7 1 3 5 7 41 43 48 61 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 * 0 9 18 27 0 9 8 7 40 61 74 95 * 0 10 20 30 0 40 * 0 11 22 33 0 1 2 3 40 41 42 43 * 0 12 24 36 0 2 4 6 40 42 45 53 - * 78 102 211 1 2 8 41 42 74 (*) + * 78 102 211 1 2 8 41 42 74 [#f1]_ + * =============== ============== ================= + * + * .. [#f1] * - * (*) For these marked lines, if we hadn't first done bitmap_fold() + * For these marked lines, if we hadn't first done bitmap_fold() * into tmp, then the @dst result would have been empty. * * If either of @orig or @relmap is empty (no set bits), then @dst diff --git a/lib/bug.c b/lib/bug.c index 06edbbef0623..a6a1137d06db 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -47,7 +47,7 @@ #include <linux/sched.h> #include <linux/rculist.h> -extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; +extern struct bug_entry __start___bug_table[], __stop___bug_table[]; static inline unsigned long bug_addr(const struct bug_entry *bug) { @@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) /* Updates are protected by module mutex */ static LIST_HEAD(module_bug_list); -static const struct bug_entry *module_find_bug(unsigned long bugaddr) +static struct bug_entry *module_find_bug(unsigned long bugaddr) { struct module *mod; - const struct bug_entry *bug = NULL; + struct bug_entry *bug = NULL; rcu_read_lock_sched(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { @@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod) #else -static inline const struct bug_entry *module_find_bug(unsigned long bugaddr) +static inline struct bug_entry *module_find_bug(unsigned long bugaddr) { return NULL; } #endif -const struct bug_entry *find_bug(unsigned long bugaddr) +struct bug_entry *find_bug(unsigned long bugaddr) { - const struct bug_entry *bug; + struct bug_entry *bug; for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) if (bugaddr == bug_addr(bug)) @@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr) enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) { - const struct bug_entry *bug; + struct bug_entry *bug; const char *file; - unsigned line, warning; + unsigned line, warning, once, done; if (!is_valid_bugaddr(bugaddr)) return BUG_TRAP_TYPE_NONE; @@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) line = bug->line; #endif warning = (bug->flags & BUGFLAG_WARNING) != 0; + once = (bug->flags & BUGFLAG_ONCE) != 0; + done = (bug->flags & BUGFLAG_DONE) != 0; + + if (warning && once) { + if (done) + return BUG_TRAP_TYPE_WARN; + + /* + * Since this is the only store, concurrency is not an issue. + */ + bug->flags |= BUGFLAG_DONE; + } } if (warning) { diff --git a/lib/cmdline.c b/lib/cmdline.c index 8f13cf73c2ec..3c6432df7e63 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -15,6 +15,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/string.h> +#include <linux/ctype.h> /* * If a hyphen was found in get_option, this will handle the @@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option) return false; } + +/* + * Parse a string to get a param value pair. + * You can use " around spaces, but can't escape ". + * Hyphens and underscores equivalent in parameter names. + */ +char *next_arg(char *args, char **param, char **val) +{ + unsigned int i, equals = 0; + int in_quote = 0, quoted = 0; + char *next; + + if (*args == '"') { + args++; + in_quote = 1; + quoted = 1; + } + + for (i = 0; args[i]; i++) { + if (isspace(args[i]) && !in_quote) + break; + if (equals == 0) { + if (args[i] == '=') + equals = i; + } + if (args[i] == '"') + in_quote = !in_quote; + } + + *param = args; + if (!equals) + *val = NULL; + else { + args[equals] = '\0'; + *val = args + equals + 1; + + /* Don't include quotes in value. */ + if (**val == '"') { + (*val)++; + if (args[i-1] == '"') + args[i-1] = '\0'; + } + } + if (quoted && args[i-1] == '"') + args[i-1] = '\0'; + + if (args[i]) { + args[i] = '\0'; + next = args + i + 1; + } else + next = args + i; + + /* Chew up trailing spaces. */ + return skip_spaces(next); + //return next; +} diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 60abc44385b7..4952311422c1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, size_t count) { /* It will get better. Eventually... */ - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { direction |= ITER_KVEC; i->type = direction; i->kvec = (struct kvec *)iov; @@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return 0; } iterate_and_advance(i, bytes, v, - __copy_from_user_nocache((to += v.iov_len) - v.iov_len, + __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), @@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, + if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), @@ -798,7 +798,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) while (1) { size_t n = off - pipe->bufs[idx].offset; if (unroll < n) { - off -= (n - unroll); + off -= unroll; break; } unroll -= n; diff --git a/lib/kobject.c b/lib/kobject.c index 445dcaeb0f56..763d70a18941 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj) } EXPORT_SYMBOL(kobject_get); -static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) +struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) { + if (!kobj) + return NULL; if (!kref_get_unless_zero(&kobj->kref)) kobj = NULL; return kobj; } +EXPORT_SYMBOL(kobject_get_unless_zero); /* * kobject_cleanup - free kobject resources. diff --git a/lib/md5.c b/lib/md5.c deleted file mode 100644 index bb0cd01d356d..000000000000 --- a/lib/md5.c +++ /dev/null @@ -1,95 +0,0 @@ -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/cryptohash.h> - -#define F1(x, y, z) (z ^ (x & (y ^ z))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) (x ^ y ^ z) -#define F4(x, y, z) (y ^ (x | ~z)) - -#define MD5STEP(f, w, x, y, z, in, s) \ - (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) - -void md5_transform(__u32 *hash, __u32 const *in) -{ - u32 a, b, c, d; - - a = hash[0]; - b = hash[1]; - c = hash[2]; - d = hash[3]; - - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); - - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); - - hash[0] += a; - hash[1] += b; - hash[2] += c; - hash[3] += d; -} -EXPORT_SYMBOL(md5_transform); diff --git a/lib/refcount.c b/lib/refcount.c index aa09ad3c30b0..f42124ccf295 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -37,11 +37,29 @@ #include <linux/refcount.h> #include <linux/bug.h> +/** + * refcount_add_not_zero - add a value to a refcount unless it is 0 + * @i: the value to add to the refcount + * @r: the refcount + * + * Will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc(), or one of its variants, should instead be used to + * increment a reference count. + * + * Return: false if the passed refcount is 0, true otherwise + */ bool refcount_add_not_zero(unsigned int i, refcount_t *r) { - unsigned int old, new, val = atomic_read(&r->refs); + unsigned int new, val = atomic_read(&r->refs); - for (;;) { + do { if (!val) return false; @@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) new = val + i; if (new < val) new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - val = old; - } + } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); @@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) } EXPORT_SYMBOL_GPL(refcount_add_not_zero); +/** + * refcount_add - add a value to a refcount + * @i: the value to add to the refcount + * @r: the refcount + * + * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc(), or one of its variants, should instead be used to + * increment a reference count. + */ void refcount_add(unsigned int i, refcount_t *r) { WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); } EXPORT_SYMBOL_GPL(refcount_add); -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. +/** + * refcount_inc_not_zero - increment a refcount unless it is 0 + * @r: the refcount to increment + * + * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. + * + * Return: true if the increment was successful, false otherwise */ bool refcount_inc_not_zero(refcount_t *r) { - unsigned int old, new, val = atomic_read(&r->refs); + unsigned int new, val = atomic_read(&r->refs); - for (;;) { + do { new = val + 1; if (!val) @@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r) if (unlikely(!new)) return true; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } + } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); @@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r) } EXPORT_SYMBOL_GPL(refcount_inc_not_zero); -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. +/** + * refcount_inc - increment a refcount + * @r: the refcount to increment + * + * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. + * reference on the object. + * + * Will WARN if the refcount is 0, as this represents a possible use-after-free + * condition. */ void refcount_inc(refcount_t *r) { @@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r) } EXPORT_SYMBOL_GPL(refcount_inc); +/** + * refcount_sub_and_test - subtract from a refcount and test if it is 0 + * @i: amount to subtract from the refcount + * @r: the refcount + * + * Similar to atomic_dec_and_test(), but it will WARN, return false and + * ultimately leak on underflow and will fail to decrement when saturated + * at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_dec(), or one of its variants, should instead be used to + * decrement a reference count. + * + * Return: true if the resulting refcount is 0, false otherwise + */ bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - unsigned int old, new, val = atomic_read(&r->refs); + unsigned int new, val = atomic_read(&r->refs); - for (;;) { + do { if (unlikely(val == UINT_MAX)) return false; @@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) return false; } - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } + } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); return !new; } EXPORT_SYMBOL_GPL(refcount_sub_and_test); -/* +/** + * refcount_dec_and_test - decrement a refcount and test if it is 0 + * @r: the refcount + * * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. + * + * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_dec_and_test(refcount_t *r) { @@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r) } EXPORT_SYMBOL_GPL(refcount_dec_and_test); -/* +/** + * refcount_dec - decrement a refcount + * @r: the refcount + * * Similar to atomic_dec(), it will WARN on underflow and fail to decrement * when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before. */ - void refcount_dec(refcount_t *r) { WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL_GPL(refcount_dec); -/* +/** + * refcount_dec_if_one - decrement a refcount if it is 1 + * @r: the refcount + * * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the * success thereof. * @@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec); * It can be used like a try-delete operator; this explicit case is provided * and not cmpxchg in generic, because that would allow implementing unsafe * operations. + * + * Return: true if the resulting refcount is 0, false otherwise */ bool refcount_dec_if_one(refcount_t *r) { - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; + int val = 1; + + return atomic_try_cmpxchg_release(&r->refs, &val, 0); } EXPORT_SYMBOL_GPL(refcount_dec_if_one); -/* +/** + * refcount_dec_not_one - decrement a refcount if it is not 1 + * @r: the refcount + * * No atomic_t counterpart, it decrements unless the value is 1, in which case * it will return false. * * Was often done like: atomic_add_unless(&var, -1, 1) + * + * Return: true if the decrement operation was successful, false otherwise */ bool refcount_dec_not_one(refcount_t *r) { - unsigned int old, new, val = atomic_read(&r->refs); + unsigned int new, val = atomic_read(&r->refs); - for (;;) { + do { if (unlikely(val == UINT_MAX)) return true; @@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r) return true; } - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } + } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); return true; } EXPORT_SYMBOL_GPL(refcount_dec_not_one); -/* +/** + * refcount_dec_and_mutex_lock - return holding mutex if able to decrement + * refcount to 0 + * @r: the refcount + * @lock: the mutex to be locked + * * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail * to decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. + * + * Return: true and hold mutex if able to decrement refcount to 0, false + * otherwise */ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { @@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) } EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); -/* +/** + * refcount_dec_and_lock - return holding spinlock if able to decrement + * refcount to 0 + * @r: the refcount + * @lock: the spinlock to be locked + * * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. + * + * Return: true and hold spinlock if able to decrement refcount to 0, false + * otherwise */ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 60e800e0b5a0..80aa8d5463fa 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) } EXPORT_SYMBOL_GPL(sbitmap_resize); -static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, - bool wrap) +static int __sbitmap_get_word(unsigned long *word, unsigned long depth, + unsigned int hint, bool wrap) { unsigned int orig_hint = hint; int nr; while (1) { - nr = find_next_zero_bit(&word->word, word->depth, hint); - if (unlikely(nr >= word->depth)) { + nr = find_next_zero_bit(word, depth, hint); + if (unlikely(nr >= depth)) { /* * We started with an offset, and we didn't reset the * offset to 0 in a failure case, so start from 0 to @@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, return -1; } - if (!test_and_set_bit(nr, &word->word)) + if (!test_and_set_bit(nr, word)) break; hint = nr + 1; - if (hint >= word->depth - 1) + if (hint >= depth - 1) hint = 0; } @@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) index = SB_NR_TO_INDEX(sb, alloc_hint); for (i = 0; i < sb->map_nr; i++) { - nr = __sbitmap_get_word(&sb->map[index], + nr = __sbitmap_get_word(&sb->map[index].word, + sb->map[index].depth, SB_NR_TO_BIT(sb, alloc_hint), !round_robin); if (nr != -1) { @@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) } EXPORT_SYMBOL_GPL(sbitmap_get); +int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, + unsigned long shallow_depth) +{ + unsigned int i, index; + int nr = -1; + + index = SB_NR_TO_INDEX(sb, alloc_hint); + + for (i = 0; i < sb->map_nr; i++) { + nr = __sbitmap_get_word(&sb->map[index].word, + min(sb->map[index].depth, shallow_depth), + SB_NR_TO_BIT(sb, alloc_hint), true); + if (nr != -1) { + nr += index << sb->shift; + break; + } + + /* Jump to next index. */ + index++; + alloc_hint = index << sb->shift; + + if (index >= sb->map_nr) { + index = 0; + alloc_hint = 0; + } + } + + return nr; +} +EXPORT_SYMBOL_GPL(sbitmap_get_shallow); + bool sbitmap_any_bit_set(const struct sbitmap *sb) { unsigned int i; @@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq) } EXPORT_SYMBOL_GPL(__sbitmap_queue_get); +int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int shallow_depth) +{ + unsigned int hint, depth; + int nr; + + hint = this_cpu_read(*sbq->alloc_hint); + depth = READ_ONCE(sbq->sb.depth); + if (unlikely(hint >= depth)) { + hint = depth ? prandom_u32() % depth : 0; + this_cpu_write(*sbq->alloc_hint, hint); + } + nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); + + if (nr == -1) { + /* If the map is full, a hint won't do us much good. */ + this_cpu_write(*sbq->alloc_hint, 0); + } else if (nr == hint || unlikely(sbq->round_robin)) { + /* Only update the hint if we used it. */ + hint = nr + 1; + if (hint >= depth - 1) + hint = 0; + this_cpu_write(*sbq->alloc_hint, hint); + } + + return nr; +} +EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); + static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) { int i, wake_index; diff --git a/lib/string.c b/lib/string.c index ed83562a53ae..b5c9a1168d3a 100644 --- a/lib/string.c +++ b/lib/string.c @@ -131,7 +131,7 @@ EXPORT_SYMBOL(strncpy); * @src: Where to copy the string from * @size: size of destination buffer * - * Compatible with *BSD: the result is always a valid + * Compatible with ``*BSD``: the result is always a valid * NUL-terminated string that fits in the buffer (unless, * of course, the buffer size is zero). It does not pad * out the result like strncpy() does. diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index 1a8d71a68531..4621db801b23 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c @@ -31,7 +31,6 @@ * their capability at compile-time, we just have to opt-out certain archs. */ #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ - !defined(CONFIG_AVR32) && \ !defined(CONFIG_BLACKFIN) && \ !defined(CONFIG_M32R) && \ !defined(CONFIG_M68K) && \ diff --git a/lib/usercopy.c b/lib/usercopy.c new file mode 100644 index 000000000000..1b6010a3beb8 --- /dev/null +++ b/lib/usercopy.c @@ -0,0 +1,26 @@ +#include <linux/uaccess.h> + +/* out-of-line parts */ + +#ifndef INLINE_COPY_FROM_USER +unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long res = n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} +EXPORT_SYMBOL(_copy_from_user); +#endif + +#ifndef INLINE_COPY_TO_USER +unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) +{ + if (likely(access_ok(VERIFY_WRITE, to, n))) + n = raw_copy_to_user(to, from, n); + return n; +} +EXPORT_SYMBOL(_copy_to_user); +#endif diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e3bf4e0f10b5..176641cc549d 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1954,13 +1954,13 @@ set_precision(struct printf_spec *spec, int prec) * This function generally follows C99 vsnprintf, but has some * extensions and a few limitations: * - * %n is unsupported - * %p* is handled by pointer() + * - ``%n`` is unsupported + * - ``%p*`` is handled by pointer() * * See pointer() or Documentation/printk-formats.txt for more * extensive description. * - * ** Please update the documentation in both places when making changes ** + * **Please update the documentation in both places when making changes** * * The return value is the number of characters which would * be generated for the given input, excluding the trailing |