diff options
author | Marc Zyngier <Marc.Zyngier@arm.com> | 2011-12-15 15:19:23 +0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-19 03:00:26 +0400 |
commit | 2f0778afac79bd8d226225556858a636931eeabc (patch) | |
tree | e00cea674f3d6cc8c5584aa5b75239f11f6d229d /arch/arm/include | |
parent | 3bdc3484e8f2b1b219ad0397d81ce4601fbaf76d (diff) | |
download | linux-2f0778afac79bd8d226225556858a636931eeabc.tar.xz |
ARM: 7205/2: sched_clock: allow sched_clock to be selected at runtime
sched_clock() is yet another blocker on the road to the single
image. This patch implements an idea by Russell King:
http://www.spinics.net/lists/linux-omap/msg49561.html
Instead of asking the platform to implement both sched_clock()
itself and the rollover callback, simply register a read()
function, and let the ARM code care about sched_clock() itself,
the conversion to ns and the rollover. sched_clock() uses
this read() function as an indirection to the platform code.
If the platform doesn't provide a read(), the code falls back
to the jiffy counter (just like the default sched_clock).
This allow some simplifications and possibly some footprint gain
when multiple platforms are compiled in. Among the drawbacks,
the removal of the *_fixed_sched_clock optimization which could
negatively impact some platforms (sa1100, tegra, versatile
and omap).
Tested on 11MPCore, OMAP4 and Tegra.
Cc: Imre Kaloz <kaloz@openwrt.org>
Cc: Eric Miao <eric.y.miao@gmail.com>
Cc: Colin Cross <ccross@android.com>
Cc: Erik Gilling <konkers@android.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Sascha Hauer <kernel@pengutronix.de>
Cc: Alessandro Rubini <rubini@unipv.it>
Cc: STEricsson <STEricsson_nomadik_linux@list.st.com>
Cc: Lennert Buytenhek <kernel@wantstofly.org>
Cc: Ben Dooks <ben-linux@fluff.org>
Tested-by: Jamie Iles <jamie@jamieiles.com>
Tested-by: Tony Lindgren <tony@atomide.com>
Tested-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Krzysztof Halasa <khc@pm.waw.pl>
Acked-by: Kukjin Kim <kgene.kim@samsung.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/sched_clock.h | 108 |
1 files changed, 1 insertions, 107 deletions
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index c8e6ddf3e860..e3f757263438 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -8,113 +8,7 @@ #ifndef ASM_SCHED_CLOCK #define ASM_SCHED_CLOCK -#include <linux/kernel.h> -#include <linux/types.h> - -struct clock_data { - u64 epoch_ns; - u32 epoch_cyc; - u32 epoch_cyc_copy; - u32 mult; - u32 shift; -}; - -#define DEFINE_CLOCK_DATA(name) struct clock_data name - -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) -{ - return (cyc * mult) >> shift; -} - -/* - * Atomically update the sched_clock epoch. Your update callback will - * be called from a timer before the counter wraps - read the current - * counter value, and call this function to safely move the epochs - * forward. Only use this from the update callback. - */ -static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) -{ - unsigned long flags; - u64 ns = cd->epoch_ns + - cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); - - /* - * Write epoch_cyc and epoch_ns in a way that the update is - * detectable in cyc_to_fixed_sched_clock(). - */ - raw_local_irq_save(flags); - cd->epoch_cyc = cyc; - smp_wmb(); - cd->epoch_ns = ns; - smp_wmb(); - cd->epoch_cyc_copy = cyc; - raw_local_irq_restore(flags); -} - -/* - * If your clock rate is known at compile time, using this will allow - * you to optimize the mult/shift loads away. This is paired with - * init_fixed_sched_clock() to ensure that your mult/shift are correct. - */ -static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask, u32 mult, u32 shift) -{ - u64 epoch_ns; - u32 epoch_cyc; - - /* - * Load the epoch_cyc and epoch_ns atomically. We do this by - * ensuring that we always write epoch_cyc, epoch_ns and - * epoch_cyc_copy in strict order, and read them in strict order. - * If epoch_cyc and epoch_cyc_copy are not equal, then we're in - * the middle of an update, and we should repeat the load. - */ - do { - epoch_cyc = cd->epoch_cyc; - smp_rmb(); - epoch_ns = cd->epoch_ns; - smp_rmb(); - } while (epoch_cyc != cd->epoch_cyc_copy); - - return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); -} - -/* - * Otherwise, you need to use this, which will obtain the mult/shift - * from the clock_data structure. Use init_sched_clock() with this. - */ -static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask) -{ - return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); -} - -/* - * Initialize the clock data - calculate the appropriate multiplier - * and shift. Also setup a timer to ensure that the epoch is refreshed - * at the appropriate time interval, which will call your update - * handler. - */ -void init_sched_clock(struct clock_data *, void (*)(void), - unsigned int, unsigned long); - -/* - * Use this initialization function rather than init_sched_clock() if - * you're using cyc_to_fixed_sched_clock, which will warn if your - * constants are incorrect. - */ -static inline void init_fixed_sched_clock(struct clock_data *cd, - void (*update)(void), unsigned int bits, unsigned long rate, - u32 mult, u32 shift) -{ - init_sched_clock(cd, update, bits, rate); - if (cd->mult != mult || cd->shift != shift) { - pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" - "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", - mult, shift, cd->mult, cd->shift); - } -} - extern void sched_clock_postinit(void); +extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); #endif |