diff options
author | Tony Luck <tony.luck@intel.com> | 2010-01-08 03:10:57 +0300 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-01-08 03:10:57 +0300 |
commit | 6c57a332901f851bd092aba7a2b4d8ef4e643829 (patch) | |
tree | 049e913b0c7b4c678ba699ac02f62e16db8cb515 /arch/ia64 | |
parent | 410dc0aac63d1500faeabcbaecce4f4266380ed1 (diff) | |
download | linux-6c57a332901f851bd092aba7a2b4d8ef4e643829.tar.xz |
[IA64] __per_cpu_idtrs[] is a memory hog
__per_cpu_idtrs is statically allocated ... on CONFIG_NR_CPUS=4096
systems it hogs 16MB of memory. This is way too much for a quite
probably unused facility (only KVM uses dynamic TR registers).
Change to an array of pointers, and allocate entries as needed on
a per cpu basis. Change the name too as the __per_cpu_ prefix is
confusing (this isn't a classic <linux/percpu.h> type object).
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 5 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 32 |
3 files changed, 24 insertions, 15 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 85d965cb19a0..23cce999eb1c 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -74,7 +74,7 @@ struct ia64_tr_entry { extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); extern void ia64_ptr_entry(u64 target_mask, int slot); -extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; +extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; /* region register macros diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 32f2639e9b0a..378b4833024f 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord) unsigned long psr; int cpu = smp_processor_id(); + if (!ia64_idtrs[cpu]) + return; + psr = ia64_clear_ic(); for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { - p = &__per_cpu_idtrs[cpu][iord-1][i]; + p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; if (p->pte & 0x1) { old_rr = ia64_get_rr(p->ifa); if (old_rr != p->rr) { diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index ee09d261f2e6..f3de9d7a98b4 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush); DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ -struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; +struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; /* * Initializes the ia64_ctx.bitmap array based on max_ctx+1. @@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) struct ia64_tr_entry *p; int cpu = smp_processor_id(); + if (!ia64_idtrs[cpu]) { + ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX * + sizeof (struct ia64_tr_entry), GFP_KERNEL); + if (!ia64_idtrs[cpu]) + return -ENOMEM; + } r = -EINVAL; /*Check overlap with existing TR entries*/ if (target_mask & 0x1) { - p = &__per_cpu_idtrs[cpu][0][0]; + p = ia64_idtrs[cpu]; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) @@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) } } if (target_mask & 0x2) { - p = &__per_cpu_idtrs[cpu][1][0]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) @@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { switch (target_mask & 0x3) { case 1: - if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + i)->pte & 0x1)) goto found; continue; case 2: - if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; case 3: - if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && - !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (!((ia64_idtrs[cpu] + i)->pte & 0x1) && + !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; default: @@ -488,7 +494,7 @@ found: if (target_mask & 0x1) { ia64_itr(0x1, i, va, pte, log_size); ia64_srlz_i(); - p = &__per_cpu_idtrs[cpu][0][i]; + p = ia64_idtrs[cpu] + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; @@ -497,7 +503,7 @@ found: if (target_mask & 0x2) { ia64_itr(0x2, i, va, pte, log_size); ia64_srlz_i(); - p = &__per_cpu_idtrs[cpu][1][i]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; @@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot) return; if (target_mask & 0x1) { - p = &__per_cpu_idtrs[cpu][0][slot]; + p = ia64_idtrs[cpu] + slot; if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x1, p->ifa, p->itir>>2); @@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot) } if (target_mask & 0x2) { - p = &__per_cpu_idtrs[cpu][1][slot]; + p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot; if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x2, p->ifa, p->itir>>2); @@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot) } for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { - if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || - (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) + if (((ia64_idtrs[cpu] + i)->pte & 0x1) || + ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) break; } per_cpu(ia64_tr_used, cpu) = i; |