diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-10-16 03:41:44 +0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-18 03:24:55 +0400 |
commit | 24f287e412ae90de8d281543c8b1043b6ed6c019 (patch) | |
tree | eb69803d187d35fd9e90c1428952c0ed5a0970c1 /include/asm-sparc64 | |
parent | d85714d81cc0408daddb68c10f7fd69eafe7c213 (diff) | |
download | linux-24f287e412ae90de8d281543c8b1043b6ed6c019.tar.xz |
[SPARC64]: Implement atomic backoff.
When the cpu count is high and contention hits an atomic object, the
processors can synchronize such that some cpus continually get knocked
out and cannot complete the atomic update.
So implement an exponential backoff when SMP.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/backoff.h | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/include/asm-sparc64/backoff.h b/include/asm-sparc64/backoff.h new file mode 100644 index 000000000000..0e32f8b62fd2 --- /dev/null +++ b/include/asm-sparc64/backoff.h @@ -0,0 +1,28 @@ +#ifndef _SPARC64_BACKOFF_H +#define _SPARC64_BACKOFF_H + +#define BACKOFF_LIMIT (4 * 1024) + +#ifdef CONFIG_SMP + +#define BACKOFF_SETUP(reg) \ + mov 1, reg + +#define BACKOFF_SPIN(reg, tmp, label) \ + mov reg, tmp; \ +88: brnz,pt tmp, 88b; \ + sub tmp, 1, tmp; \ + cmp reg, BACKOFF_LIMIT; \ + bg,pn %xcc, label; \ + nop; \ + ba,pt %xcc, label; \ + sllx reg, 1, reg; + +#else + +#define BACKOFF_SETUP(reg) +#define BACKOFF_SPIN(reg, tmp, label) + +#endif + +#endif /* _SPARC64_BACKOFF_H */ |