summaryrefslogtreecommitdiff
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
authorPavel Machek <pavel@ucw.cz>2005-11-07 11:58:40 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 18:53:29 +0300
commitdc19d507b17135069d9c5d6093d4458dc60e1861 (patch)
treea5913fbd1185b3af5463a90aca59fe902aba2f73 /kernel/power/snapshot.c
parent36fabc248e5466e3f28897819b0400b5cdbb8dc6 (diff)
downloadlinux-dc19d507b17135069d9c5d6093d4458dc60e1861.tar.xz
[PATCH] swsusp cleanups
This cleans spaces between * and pointer up, and adds "int" in "unsigned int". Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 42a628704398..723f5179883e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -168,9 +168,8 @@ static unsigned count_data_pages(void)
{
struct zone *zone;
unsigned long zone_pfn;
- unsigned n;
+ unsigned int n = 0;
- n = 0;
for_each_zone (zone) {
if (is_highmem(zone))
continue;
@@ -250,10 +249,10 @@ static inline void fill_pb_page(struct pbe *pbpage)
* of memory pages allocated with alloc_pagedir()
*/
-void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
+void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
{
struct pbe *pbpage, *p;
- unsigned num = PBES_PER_PAGE;
+ unsigned int num = PBES_PER_PAGE;
for_each_pb_page (pbpage, pblist) {
if (num >= nr_pages)
@@ -293,9 +292,9 @@ static void *alloc_image_page(void)
* On each page we set up a list of struct_pbe elements.
*/
-struct pbe *alloc_pagedir(unsigned nr_pages)
+struct pbe *alloc_pagedir(unsigned int nr_pages)
{
- unsigned num;
+ unsigned int num;
struct pbe *pblist, *pbe;
if (!nr_pages)
@@ -329,7 +328,7 @@ void swsusp_free(void)
for_each_zone(zone) {
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
- struct page * page;
+ struct page *page;
page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
if (PageNosave(page) && PageNosaveFree(page)) {
ClearPageNosave(page);
@@ -348,7 +347,7 @@ void swsusp_free(void)
* free pages.
*/
-static int enough_free_mem(unsigned nr_pages)
+static int enough_free_mem(unsigned int nr_pages)
{
pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
return nr_free_pages() > (nr_pages + PAGES_FOR_IO +
@@ -356,7 +355,7 @@ static int enough_free_mem(unsigned nr_pages)
}
-static struct pbe *swsusp_alloc(unsigned nr_pages)
+static struct pbe *swsusp_alloc(unsigned int nr_pages)
{
struct pbe *pblist, *p;
@@ -380,7 +379,7 @@ static struct pbe *swsusp_alloc(unsigned nr_pages)
asmlinkage int swsusp_save(void)
{
- unsigned nr_pages;
+ unsigned int nr_pages;
pr_debug("swsusp: critical section: \n");
if (save_highmem()) {