diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-06-17 18:14:06 +0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-08-30 08:49:12 +0400 |
commit | d091fcb97ff48a5cb6de19ad0881fb2c8e76dbc0 (patch) | |
tree | c54e1ce880e399ed6a72c054f6cf244bc3752071 /arch/arc | |
parent | 64b703ef276964b160a5e88df0764f254460cafb (diff) | |
download | linux-d091fcb97ff48a5cb6de19ad0881fb2c8e76dbc0.tar.xz |
ARC: MMUv4 preps/2 - Reshuffle PTE bits
With previous commit freeing up PTE bits, reassign them so as to:
- Match the bit to H/w counterpart where possible
(e.g. MMUv2 GLOBAL/PRESENT, this avoids a shift in create_tlb())
- Avoid holes in _PAGE_xxx definitions
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 22 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 11 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 3 |
3 files changed, 11 insertions, 25 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 99799c91ca00..6b0b7f7ef783 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -60,30 +60,24 @@ #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ -#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ -#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ -#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ -#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ +#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ +#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ +#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ +#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ #else /* MMU v3 onwards */ -/* PD1 */ #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ -#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ - -/* PD0 */ +#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ +#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ +#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ -#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr +#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr usable for shared TLB entries (H) */ - -#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */ -#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */ - -#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ #endif /* vmalloc permissions */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index f9908341e8a7..85a8716e6028 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -342,7 +342,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { unsigned long flags; unsigned int idx, asid_or_sasid, rwx; - unsigned long pd0_flags; /* * create_tlb() assumes that current->mm == vma->mm, since @@ -381,17 +380,13 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) /* update this PTE credentials */ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); - /* Create HW TLB entry Flags (in PD0) from PTE Flags */ -#if (CONFIG_ARC_MMU_VER <= 2) - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); -#else - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); -#endif + /* Create HW TLB(PD0,PD1) from PTE */ /* ASID for this task */ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; - write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); + write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid | + (pte_val(*ptep) & PTE_BITS_IN_PD0)); /* * ARC MMU provides fully orthogonal access bits for K/U mode, diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index ec382e59d681..50e83ca96b96 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -229,9 +229,6 @@ ex_saved_reg1: sr r3, [ARC_REG_TLBPD1] ; these go in PD1 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb -#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ - lsr r2, r2 ; shift PTE flags to match layout in PD0 -#endif lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid |