summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/suspend.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@in.ibm.com>2007-05-02 21:27:07 +0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 21:27:07 +0400
commit49c3df6aaa6a51071fc135273d1a2515d019099f (patch)
treecbd2fb611d14c9c859f7f417dfafae36ebebe29b /arch/powerpc/kernel/suspend.c
parentcfd243d4af7c7f8f52f5cb99d3932d9074b039ff (diff)
downloadlinux-49c3df6aaa6a51071fc135273d1a2515d019099f.tar.xz
[PATCH] x86: Move swsusp __pa() dependent code to arch portion
o __pa() should be used only on kernel linearly mapped virtual addresses and not on kernel text and data addresses. o Hibernation code needs to determine the physical address associated with kernel symbol to mark a section boundary which contains pages which don't have to be saved and restored during hibernate/resume operation. o Move this piece of code in arch dependent section. So that architectures which don't have kernel text/data mapped into kernel linearly mapped region can come up with their own ways of determining physical addresses associated with a kernel text. Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/powerpc/kernel/suspend.c')
-rw-r--r--arch/powerpc/kernel/suspend.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
new file mode 100644
index 000000000000..8cee57107541
--- /dev/null
+++ b/arch/powerpc/kernel/suspend.c
@@ -0,0 +1,24 @@
+/*
+ * Suspend support specific for power.
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
+ * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
+ */
+
+#include <asm/page.h>
+
+/* References to section boundaries */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}