summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S28
-rw-r--r--arch/arm64/mm/context.c38
-rw-r--r--arch/arm64/mm/copypage.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c28
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c274
-rw-r--r--arch/arm64/mm/init.c9
-rw-r--r--arch/arm64/mm/mmu.c94
-rw-r--r--arch/arm64/mm/pgd.c12
-rw-r--r--arch/arm64/mm/proc-macros.S22
-rw-r--r--arch/arm64/mm/proc.S4
12 files changed, 415 insertions, 134 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index cfa44a6adc0a..6df07069a025 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -81,26 +81,32 @@ ENDPROC(__flush_cache_user_range)
/*
* __flush_dcache_area(kaddr, size)
*
- * Ensure that the data held in the page kaddr is written back to the
- * page in question.
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are cleaned and invalidated to the PoC.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__flush_dcache_area)
- dcache_line_size x2, x3
- add x1, x0, x1
- sub x3, x2, #1
- bic x0, x0, x3
-1: dc civac, x0 // clean & invalidate D line / unified line
- add x0, x0, x2
- cmp x0, x1
- b.lo 1b
- dsb sy
+ dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
ENDPIPROC(__flush_dcache_area)
/*
+ * __clean_dcache_area_pou(kaddr, size)
+ *
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are cleaned to the PoU.
+ *
+ * - kaddr - kernel address
+ * - size - size in question
+ */
+ENTRY(__clean_dcache_area_pou)
+ dcache_by_line_op cvau, ish, x0, x1, x2, x3
+ ret
+ENDPROC(__clean_dcache_area_pou)
+
+/*
* __inval_cache_range(start, end)
* - start - start address of region
* - end - end address of region
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index f636a2639f03..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
- for_each_possible_cpu(cpu)
- if (per_cpu(reserved_asids, cpu) == asid)
- return 1;
- return 0;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved ASIDs looking for a match.
+ * If we find one, then we can update our mm to use newasid
+ * (i.e. the same ASID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old ASID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved ASID in a future
+ * generation.
+ */
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_asids, cpu) == asid) {
+ hit = true;
+ per_cpu(reserved_asids, cpu) = newasid;
+ }
+ }
+
+ return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
- if (is_reserved_asid(asid))
- return generation | (asid & ~ASID_MASK);
+ if (check_update_reserved_asid(asid, newasid))
+ return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
- goto bump_gen;
+ return newasid;
}
/*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
-
-bump_gen:
- asid |= generation;
- return asid;
+ return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 13bbc3be6f5a..22e4cb4d6f53 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -24,8 +24,9 @@
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
+ struct page *page = virt_to_page(kto);
copy_page(kto, kfrom);
- __flush_dcache_area(kto, PAGE_SIZE);
+ flush_dcache_page(page);
}
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 7963aa4b5d28..331c4ca6205c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -40,7 +40,7 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
static struct gen_pool *atomic_pool;
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
@@ -896,7 +896,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
return 0;
}
-static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
{
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
int ret;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 19211c4a8911..92ddac1e8ca2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -393,16 +393,16 @@ static struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
- { do_bad, SIGBUS, 0, "reserved access flag fault" },
+ { do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
- { do_bad, SIGBUS, 0, "reserved permission fault" },
+ { do_bad, SIGBUS, 0, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
- { do_bad, SIGBUS, 0, "asynchronous external abort" },
+ { do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
- { do_bad, SIGBUS, 0, "asynchronous parity error" },
+ { do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
- { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGBUS, 0, "unknown 34" },
{ do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" },
{ do_bad, SIGBUS, 0, "unknown 37" },
@@ -433,21 +433,21 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
- { do_bad, SIGBUS, 0, "unknown 48" },
+ { do_bad, SIGBUS, 0, "TLB conflict abort" },
{ do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
- { do_bad, SIGBUS, 0, "unknown 53" },
+ { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
{ do_bad, SIGBUS, 0, "unknown 54" },
{ do_bad, SIGBUS, 0, "unknown 55" },
{ do_bad, SIGBUS, 0, "unknown 56" },
{ do_bad, SIGBUS, 0, "unknown 57" },
- { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
+ { do_bad, SIGBUS, 0, "unknown 58" },
{ do_bad, SIGBUS, 0, "unknown 59" },
{ do_bad, SIGBUS, 0, "unknown 60" },
- { do_bad, SIGBUS, 0, "unknown 61" },
- { do_bad, SIGBUS, 0, "unknown 62" },
+ { do_bad, SIGBUS, 0, "section domain fault" },
+ { do_bad, SIGBUS, 0, "page domain fault" },
{ do_bad, SIGBUS, 0, "unknown 63" },
};
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index c26b804015e8..46649d6e6c5a 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -34,19 +34,24 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
__flush_icache_all();
}
+static void sync_icache_aliases(void *kaddr, unsigned long len)
+{
+ unsigned long addr = (unsigned long)kaddr;
+
+ if (icache_is_aliasing()) {
+ __clean_dcache_area_pou(kaddr, len);
+ __flush_icache_all();
+ } else {
+ flush_icache_range(addr, addr + len);
+ }
+}
+
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len)
{
- if (vma->vm_flags & VM_EXEC) {
- unsigned long addr = (unsigned long)kaddr;
- if (icache_is_aliasing()) {
- __flush_dcache_area(kaddr, len);
- __flush_icache_all();
- } else {
- flush_icache_range(addr, addr + len);
- }
- }
+ if (vma->vm_flags & VM_EXEC)
+ sync_icache_aliases(kaddr, len);
}
/*
@@ -74,13 +79,11 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
if (!page_mapping(page))
return;
- if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
- __flush_dcache_area(page_address(page),
- PAGE_SIZE << compound_order(page));
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ sync_icache_aliases(page_address(page),
+ PAGE_SIZE << compound_order(page));
+ else if (icache_is_aivivt())
__flush_icache_all();
- } else if (icache_is_aivivt()) {
- __flush_icache_all();
- }
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 383b03ff38f8..82d607c3614e 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -41,17 +41,289 @@ int pud_huge(pud_t pud)
#endif
}
+static int find_num_contig(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, size_t *pgsize)
+{
+ pgd_t *pgd = pgd_offset(mm, addr);
+ pud_t *pud;
+ pmd_t *pmd;
+
+ *pgsize = PAGE_SIZE;
+ if (!pte_cont(pte))
+ return 1;
+ if (!pgd_present(*pgd)) {
+ VM_BUG_ON(!pgd_present(*pgd));
+ return 1;
+ }
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud)) {
+ VM_BUG_ON(!pud_present(*pud));
+ return 1;
+ }
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd)) {
+ VM_BUG_ON(!pmd_present(*pmd));
+ return 1;
+ }
+ if ((pte_t *)pmd == ptep) {
+ *pgsize = PMD_SIZE;
+ return CONT_PMDS;
+ }
+ return CONT_PTES;
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ size_t pgsize;
+ int i;
+ int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
+ unsigned long pfn;
+ pgprot_t hugeprot;
+
+ if (ncontig == 1) {
+ set_pte_at(mm, addr, ptep, pte);
+ return;
+ }
+
+ pfn = pte_pfn(pte);
+ hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+ for (i = 0; i < ncontig; i++) {
+ pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
+ pte_val(pfn_pte(pfn, hugeprot)));
+ set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ ptep++;
+ pfn += pgsize >> PAGE_SHIFT;
+ addr += pgsize;
+ }
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return NULL;
+
+ if (sz == PUD_SIZE) {
+ pte = (pte_t *)pud;
+ } else if (sz == (PAGE_SIZE * CONT_PTES)) {
+ pmd_t *pmd = pmd_alloc(mm, pud, addr);
+
+ WARN_ON(addr & (sz - 1));
+ /*
+ * Note that if this code were ever ported to the
+ * 32-bit arm platform then it will cause trouble in
+ * the case where CONFIG_HIGHPTE is set, since there
+ * will be no pte_unmap() to correspond with this
+ * pte_alloc_map().
+ */
+ pte = pte_alloc_map(mm, NULL, pmd, addr);
+ } else if (sz == PMD_SIZE) {
+ if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
+ pud_none(*pud))
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ } else if (sz == (PMD_SIZE * CONT_PMDS)) {
+ pmd_t *pmd;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ WARN_ON(addr & (sz - 1));
+ return (pte_t *)pmd;
+ }
+
+ pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
+ sz, pte, pte_val(*pte));
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
+ if (!pgd_present(*pgd))
+ return NULL;
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ return NULL;
+
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return NULL;
+
+ if (pte_cont(pmd_pte(*pmd))) {
+ pmd = pmd_offset(
+ pud, (addr & CONT_PMD_MASK));
+ return (pte_t *)pmd;
+ }
+ if (pmd_huge(*pmd))
+ return (pte_t *)pmd;
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_present(*pte) && pte_cont(*pte)) {
+ pte = pte_offset_kernel(
+ pmd, (addr & CONT_PTE_MASK));
+ return pte;
+ }
+ return NULL;
+}
+
+pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ size_t pagesize = huge_page_size(hstate_vma(vma));
+
+ if (pagesize == CONT_PTE_SIZE) {
+ entry = pte_mkcont(entry);
+ } else if (pagesize == CONT_PMD_SIZE) {
+ entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
+ } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
+ pr_warn("%s: unrecognized huge page size 0x%lx\n",
+ __func__, pagesize);
+ }
+ return entry;
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte;
+
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ size_t pgsize;
+ pte_t *cpte;
+ bool is_dirty = false;
+
+ cpte = huge_pte_offset(mm, addr);
+ ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ /* save the 1st pte to return */
+ pte = ptep_get_and_clear(mm, addr, cpte);
+ for (i = 1; i < ncontig; ++i) {
+ /*
+ * If HW_AFDBM is enabled, then the HW could
+ * turn on the dirty bit for any of the page
+ * in the set, so check them all.
+ */
+ ++cpte;
+ if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
+ is_dirty = true;
+ }
+ if (is_dirty)
+ return pte_mkdirty(pte);
+ else
+ return pte;
+ } else {
+ return ptep_get_and_clear(mm, addr, ptep);
+ }
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ pte_t *cpte;
+
+ if (pte_cont(pte)) {
+ int ncontig, i, changed = 0;
+ size_t pgsize = 0;
+ unsigned long pfn = pte_pfn(pte);
+ /* Select all bits except the pfn */
+ pgprot_t hugeprot =
+ __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
+ pte_val(pte));
+
+ cpte = huge_pte_offset(vma->vm_mm, addr);
+ pfn = pte_pfn(*cpte);
+ ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+ *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte) {
+ changed = ptep_set_access_flags(vma, addr, cpte,
+ pfn_pte(pfn,
+ hugeprot),
+ dirty);
+ pfn += pgsize >> PAGE_SHIFT;
+ }
+ return changed;
+ } else {
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ }
+}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ pte_t *cpte;
+ size_t pgsize = 0;
+
+ cpte = huge_pte_offset(mm, addr);
+ ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte)
+ ptep_set_wrprotect(mm, addr, cpte);
+ } else {
+ ptep_set_wrprotect(mm, addr, ptep);
+ }
+}
+
+void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ if (pte_cont(*ptep)) {
+ int ncontig, i;
+ pte_t *cpte;
+ size_t pgsize = 0;
+
+ cpte = huge_pte_offset(vma->vm_mm, addr);
+ ncontig = find_num_contig(vma->vm_mm, addr, cpte,
+ *cpte, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++cpte)
+ ptep_clear_flush(vma, addr, cpte);
+ } else {
+ ptep_clear_flush(vma, addr, ptep);
+ }
+}
+
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
+
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else if (ps == (PAGE_SIZE * CONT_PTES)) {
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
+ } else if (ps == (PMD_SIZE * CONT_PMDS)) {
+ hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
- pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+ pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 0;
}
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+ if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+ hugetlb_add_hstate(CONT_PMD_SHIFT);
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 17bf39ac83ba..f3b061e67bfe 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -71,7 +71,7 @@ early_param("initrd", early_initrd);
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -120,17 +120,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
- return memblock_is_memory(pfn << PAGE_SHIFT);
+ return memblock_is_map_memory(pfn << PAGE_SHIFT);
}
EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
}
#else
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
struct memblock_region *reg;
@@ -360,7 +360,6 @@ void free_initmem(void)
{
fixup_init();
free_initmem_default(0);
- free_alternatives_memory();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index abb66f84d4ac..58faeaa7fbdc 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = __va(memblock_alloc(sz, sz));
- BUG_ON(!ptr);
+ phys_addr_t phys;
+ void *ptr;
+
+ phys = memblock_alloc(sz, sz);
+ BUG_ON(!phys);
+ ptr = __va(phys);
memset(ptr, 0, sz);
return ptr;
}
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
do {
/*
* Need to have the least restrictive permissions available
- * permissions will be fixed up later. Default the new page
- * range as contiguous ptes.
+ * permissions will be fixed up later
*/
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++;
} while (pte++, i++, i < PTRS_PER_PTE);
}
-/*
- * Given a PTE with the CONT bit set, determine where the CONT range
- * starts, and clear the entire range of PTE CONT bits.
- */
-static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
-{
- int i;
-
- pte -= CONT_RANGE_OFFSET(addr);
- for (i = 0; i < CONT_PTES; i++) {
- set_pte(pte, pte_mknoncont(*pte));
- pte++;
- }
- flush_tlb_all();
-}
-
-/*
- * Given a range of PTEs set the pfn and provided page protection flags
- */
-static void __populate_init_pte(pte_t *pte, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- pgprot_t prot)
-{
- unsigned long pfn = __phys_to_pfn(phys);
-
- do {
- /* clear all the bits except the pfn, then apply the prot */
- set_pte(pte, pfn_pte(pfn, prot));
- pte++;
- pfn++;
- addr += PAGE_SIZE;
- } while (addr != end);
-}
-
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys,
+ unsigned long end, unsigned long pfn,
pgprot_t prot,
void *(*alloc)(unsigned long size))
{
pte_t *pte;
- unsigned long next;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- next = min(end, (addr + CONT_SIZE) & CONT_MASK);
- if (((addr | next | phys) & ~CONT_MASK) == 0) {
- /* a block of CONT_PTES */
- __populate_init_pte(pte, addr, next, phys,
- __pgprot(pgprot_val(prot) | PTE_CONT));
- } else {
- /*
- * If the range being split is already inside of a
- * contiguous range but this PTE isn't going to be
- * contiguous, then we want to unmark the adjacent
- * ranges, then update the portion of the range we
- * are interrested in.
- */
- clear_cont_pte_range(pte, addr);
- __populate_init_pte(pte, addr, next, phys, prot);
- }
-
- pte += (next - addr) >> PAGE_SHIFT;
- phys += next - addr;
- addr = next;
- } while (addr != end);
+ set_pte(pte, pfn_pte(pfn, prot));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
}
}
} else {
- alloc_init_pte(pmd, addr, next, phys, prot, alloc);
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -300,6 +251,14 @@ static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
{
unsigned long addr, length, end, next;
+ /*
+ * If the virtual and physical address don't have the same offset
+ * within a page, we cannot map the region as the caller expects.
+ */
+ if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+ return;
+
+ phys &= PAGE_MASK;
addr = virt & PAGE_MASK;
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
@@ -329,7 +288,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
+ __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
size, prot, early_alloc);
}
@@ -350,7 +309,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
return;
}
- return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
+ return __create_mapping(&init_mm, pgd_offset_k(virt),
phys, virt, size, prot, late_alloc);
}
@@ -421,6 +380,8 @@ static void __init map_mem(void)
if (start >= end)
break;
+ if (memblock_is_nomap(reg))
+ continue;
if (ARM64_SWAPPER_USES_SECTION_MAPS) {
/*
@@ -505,6 +466,9 @@ void __init paging_init(void)
empty_zero_page = virt_to_page(zero_page);
+ /* Ensure the zero page is visible to the page table walker */
+ dsb(ishst);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index cb3ba1b812e7..ae11d4e03d0e 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -46,14 +46,14 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
+ if (PGD_SIZE == PAGE_SIZE)
+ return;
+
/*
* Naturally aligned pgds required by the architecture.
*/
- if (PGD_SIZE != PAGE_SIZE)
- pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
- SLAB_PANIC, NULL);
- return 0;
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
}
-core_initcall(pgd_cache_init);
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 4c4d93c4bf65..146bd99a7532 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -62,3 +62,25 @@
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ * op: operation passed to dc instruction
+ * domain: domain used in dsb instruciton
+ * kaddr: starting virtual address of the region
+ * size: size of the region
+ * Corrupts: kaddr, size, tmp1, tmp2
+ */
+ .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ dcache_line_size \tmp1, \tmp2
+ add \size, \kaddr, \size
+ sub \tmp2, \tmp1, #1
+ bic \kaddr, \kaddr, \tmp2
+9998: dc \op, \kaddr
+ add \kaddr, \kaddr, \tmp1
+ cmp \kaddr, \size
+ b.lo 9998b
+ dsb \domain
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cacecc4ad3e5..a3d867e723b4 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -117,6 +117,7 @@ ENTRY(cpu_do_resume)
*/
ubfx x11, x11, #1, #1
msr oslar_el1, x11
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
mov x0, x12
dsb nsh // Make sure local tlb invalidation completed
isb
@@ -139,8 +140,6 @@ ENTRY(cpu_do_switch_mm)
ret
ENDPROC(cpu_do_switch_mm)
- .section ".text.init", #alloc, #execinstr
-
/*
* __cpu_setup
*
@@ -155,6 +154,7 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
*