summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/kasan.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/kasan.h')
-rw-r--r--arch/s390/include/asm/kasan.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 2768d5db181f..e5cfc81d5b61 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -14,17 +14,15 @@
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
-extern void kasan_copy_shadow_mapping(void);
-extern void kasan_free_early_identity(void);
/*
* Estimate kasan memory requirements, which it will reserve
* at the very end of available physical memory. To estimate
* that, we take into account that kasan would require
* 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the whole memory + shadow memory
- * region (1 + 1/8). To keep page tables estimates simple take
- * the double of combined ptes size.
+ * creating page tables for the shadow memory region.
+ * To keep page tables estimates simple take the double of
+ * combined ptes size.
*
* physmem parameter has to be already adjusted if not entire physical memory
* would be used (e.g. due to effect of "mem=" option).
@@ -36,15 +34,13 @@ static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
/* for shadow memory */
kasan_needs = round_up(physmem / 8, PAGE_SIZE);
/* for paging structures */
- pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
+ pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
return kasan_needs;
}
#else
static inline void kasan_early_init(void) { }
-static inline void kasan_copy_shadow_mapping(void) { }
-static inline void kasan_free_early_identity(void) { }
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif