diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-07-19 11:34:42 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-19 11:34:42 +0400 |
commit | e43fff2b98b4e99b189c086a9cf740b19eaf3538 (patch) | |
tree | 658a5f73d5806fe47856547b17cbd23592856c62 /init | |
parent | 67516844625f45f0ce148a01c27bf41f591872b2 (diff) | |
parent | ecb2cf1a6b63825a258ff4fe0d7f3070fbe4676b (diff) | |
download | linux-e43fff2b98b4e99b189c086a9cf740b19eaf3538.tar.xz |
Merge branch 'linus' into perf/core
Merge in a v3.11-rc1-ish branch to go from v3.10 based development
to a v3.11 based one.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 33 | ||||
-rw-r--r-- | init/calibrate.c | 13 | ||||
-rw-r--r-- | init/do_mounts.c | 2 | ||||
-rw-r--r-- | init/main.c | 5 |
4 files changed, 43 insertions, 10 deletions
diff --git a/init/Kconfig b/init/Kconfig index 118895cc1f67..247084be0590 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -112,10 +112,13 @@ config HAVE_KERNEL_XZ config HAVE_KERNEL_LZO bool +config HAVE_KERNEL_LZ4 + bool + choice prompt "Kernel compression mode" default KERNEL_GZIP - depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO + depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 help The linux kernel is a kind of self-extracting executable. Several compression algorithms are available, which differ @@ -182,6 +185,18 @@ config KERNEL_LZO size is about 10% bigger than gzip; however its speed (both compression and decompression) is the fastest. +config KERNEL_LZ4 + bool "LZ4" + depends on HAVE_KERNEL_LZ4 + help + LZ4 is an LZ77-type compressor with a fixed, byte-oriented encoding. + A preliminary version of LZ4 de/compression tool is available at + <https://code.google.com/p/lz4/>. + + Its compression ratio is worse than LZO. The size of the kernel + is about 8% bigger than LZO. But the decompression speed is + faster than LZO. + endchoice config DEFAULT_HOSTNAME @@ -780,6 +795,9 @@ config LOG_BUF_SHIFT config HAVE_UNSTABLE_SCHED_CLOCK bool +config GENERIC_SCHED_CLOCK + bool + # # For architectures that want to enable the support for NUMA-affine scheduler # balancing logic: @@ -899,7 +917,7 @@ config MEMCG Note that setting this option increases fixed memory overhead associated with each page of memory in the system. By this, - 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory + 8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory usage tracking struct at boot. Total amount of this is printed out at boot. @@ -1578,6 +1596,17 @@ config SLOB endchoice +config SLUB_CPU_PARTIAL + default y + depends on SLUB + bool "SLUB per cpu partial cache" + help + Per cpu partial caches accellerate objects allocation and freeing + that is local to a processor at the price of more indeterminism + in the latency of the free. On overflow these caches will be cleared + which requires the taking of locks that may cause latency spikes. + Typically one would choose no for a realtime system. + config MMAP_ALLOW_UNINITIALIZED bool "Allow mmapped anonymous memory to be uninitialized" depends on EXPERT && !MMU diff --git a/init/calibrate.c b/init/calibrate.c index fda0a7b0f06c..520702db9acc 100644 --- a/init/calibrate.c +++ b/init/calibrate.c @@ -31,7 +31,7 @@ __setup("lpj=", lpj_setup); #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) #define MAX_DIRECT_CALIBRATION_RETRIES 5 -static unsigned long __cpuinit calibrate_delay_direct(void) +static unsigned long calibrate_delay_direct(void) { unsigned long pre_start, start, post_start; unsigned long pre_end, end, post_end; @@ -166,7 +166,10 @@ static unsigned long __cpuinit calibrate_delay_direct(void) return 0; } #else -static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} +static unsigned long calibrate_delay_direct(void) +{ + return 0; +} #endif /* @@ -180,7 +183,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} */ #define LPS_PREC 8 -static unsigned long __cpuinit calibrate_delay_converge(void) +static unsigned long calibrate_delay_converge(void) { /* First stage - slowly accelerate to find initial bounds */ unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; @@ -254,12 +257,12 @@ static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 }; * Architectures should override this function if a faster calibration * method is available. */ -unsigned long __attribute__((weak)) __cpuinit calibrate_delay_is_known(void) +unsigned long __attribute__((weak)) calibrate_delay_is_known(void) { return 0; } -void __cpuinit calibrate_delay(void) +void calibrate_delay(void) { unsigned long lpj; static bool printed; diff --git a/init/do_mounts.c b/init/do_mounts.c index a2b49f2c1bd8..816014c4627e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -536,7 +536,7 @@ void __init prepare_namespace(void) int is_floppy; if (root_delay) { - printk(KERN_INFO "Waiting %dsec before mounting root device...\n", + printk(KERN_INFO "Waiting %d sec before mounting root device...\n", root_delay); ssleep(root_delay); } diff --git a/init/main.c b/init/main.c index ec549581d732..d03d2ec2eacf 100644 --- a/init/main.c +++ b/init/main.c @@ -74,6 +74,7 @@ #include <linux/ptrace.h> #include <linux/blkdev.h> #include <linux/elevator.h> +#include <linux/sched_clock.h> #include <asm/io.h> #include <asm/bugs.h> @@ -554,6 +555,7 @@ asmlinkage void __init start_kernel(void) softirq_init(); timekeeping_init(); time_init(); + sched_clock_postinit(); perf_event_init(); profile_init(); call_function_init(); @@ -655,8 +657,6 @@ static void __init do_ctors(void) bool initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); -static char msgbuf[64]; - static int __init_or_module do_one_initcall_debug(initcall_t fn) { ktime_t calltime, delta, rettime; @@ -679,6 +679,7 @@ int __init_or_module do_one_initcall(initcall_t fn) { int count = preempt_count(); int ret; + char msgbuf[64]; if (initcall_debug) ret = do_one_initcall_debug(fn); |