summaryrefslogtreecommitdiff
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-03 03:16:24 +0300
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 12:11:34 +0300
commitf4e841da30b4bcbb8f1cc20a01157a788ff58b21 (patch)
tree8f145f6902b694402ce6291a493caf3a2348717e /arch/sparc64/mm
parent7bec08e38a7d0f088994f6eec9b6374652ea71fb (diff)
downloadlinux-f4e841da30b4bcbb8f1cc20a01157a788ff58b21.tar.xz
[SPARC64]: Turn off TSB growing for now.
There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c5
-rw-r--r--arch/sparc64/mm/tsb.c11
2 files changed, 1 insertions, 15 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4893f3e2c336..1af63307b24f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
struct page *page;
unsigned long pfn;
unsigned long pg_flags;
- unsigned long mm_rss;
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
@@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
}
mm = vma->vm_mm;
- mm_rss = get_mm_rss(mm);
- if (mm_rss >= mm->context.tsb_rss_limit)
- tsb_grow(mm, mm_rss, GFP_ATOMIC);
-
if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
struct tsb *tsb;
unsigned long tag;
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 787533f01049..2cc8e6528c63 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -310,7 +310,6 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- unsigned long initial_rss;
mm->context.sparc64_ctx_val = 0UL;
@@ -319,15 +318,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
* will be confused and think there is an older TSB to free up.
*/
mm->context.tsb = NULL;
-
- /* If this is fork, inherit the parent's TSB size. We would
- * grow it to that size on the first page fault anyways.
- */
- initial_rss = mm->context.tsb_nentries;
- if (initial_rss)
- initial_rss -= 1;
-
- tsb_grow(mm, initial_rss, GFP_KERNEL);
+ tsb_grow(mm, 0, GFP_KERNEL);
if (unlikely(!mm->context.tsb))
return -ENOMEM;