summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-10-14 17:05:18 +0300
committerIngo Molnar <mingo@kernel.org>2015-10-14 17:05:18 +0300
commitc7d77a7980e434c3af17de19e3348157f9b9ccce (patch)
treeb32c5988ce8239b80c83e94c22d68f5eb0fb84da /arch/arm64/mm
parent0ce423b6492a02be11662bfaa837dd16945aad3e (diff)
parent8a53554e12e98d1759205afd7b8e9e2ea0936f48 (diff)
downloadlinux-c7d77a7980e434c3af17de19e3348157f9b9ccce.tar.xz
Merge branch 'x86/urgent' into core/efi, to pick up a pending EFI fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S7
-rw-r--r--arch/arm64/mm/context.c16
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/fault.c29
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c4
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/mm/proc.S21
9 files changed, 72 insertions, 61 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index bdeb5d38c2dd..eb48d5df4a0f 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -143,7 +143,12 @@ __dma_clean_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
-1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
+1:
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc cvac, x0
+alternative_else
+ dc civac, x0
+alternative_endif
add x0, x0, x2
cmp x0, x1
b.lo 1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6cd36fc..d70ff14dbdbd 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@ static void flush_context(void)
__flush_icache_all();
}
-#ifdef CONFIG_SMP
-
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
unsigned long flags;
@@ -110,23 +108,12 @@ static void reset_context(void *info)
cpu_switch_mm(mm->pgd, mm);
}
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
- mm->context.id = asid;
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
unsigned int bits = asid_bits();
raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from another
* CPU before we acquired the lock.
@@ -136,7 +123,6 @@ void __new_context(struct mm_struct *mm)
raw_spin_unlock(&cpu_asid_lock);
return;
}
-#endif
/*
* At this point, it is guaranteed that the current mm (with an old
* ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@ void __new_context(struct mm_struct *mm)
cpu_last_asid = ASID_FIRST_VERSION;
asid = cpu_last_asid + smp_processor_id();
flush_context();
-#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
-#endif
cpu_last_asid += NR_CPUS - 1;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d16a1cead23f..99224dcebdc5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA;
- if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+ if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
struct page *page;
void *addr;
@@ -144,6 +144,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
struct page *page;
void *ptr, *coherent_ptr;
bool coherent = is_device_dma_coherent(dev);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
size = PAGE_ALIGN(size);
@@ -171,9 +172,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
/* create a coherent mapping */
page = virt_to_page(ptr);
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- __get_dma_pgprot(attrs,
- __pgprot(PROT_NORMAL_NC), false),
- NULL);
+ prot, NULL);
if (!coherent_ptr)
goto no_map;
@@ -303,9 +302,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
sg->length, dir);
}
-/* vma->vm_page_prot must be set appropriately before calling this function */
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int __swiotlb_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -314,6 +314,9 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -327,20 +330,24 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-static int __swiotlb_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
- return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+ if (!ret)
+ sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+ PAGE_ALIGN(size), 0);
+
+ return ret;
}
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
+ .get_sgtable = __swiotlb_get_sgtable,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd1aad8..9fadf6d7039b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,9 +30,11 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -224,6 +226,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}
/*
+ * PAN bit set implies the fault happened in kernel space, but not
+ * in the arch's user access functions.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+ goto no_context;
+
+ /*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
@@ -278,6 +287,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
@@ -492,14 +502,22 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
-static struct fault_info debug_fault_info[] = {
+int __init early_brk64(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+
+/*
+ * __refdata because early_brk64 is __init, but the reference to it is
+ * clobbered at arch_initcall time.
+ * See traps.c and debug-monitors.c:debug_traps_init().
+ */
+static struct fault_info __refdata debug_fault_info[] = {
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
{ do_bad, SIGBUS, 0, "unknown 3" },
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
{ do_bad, SIGTRAP, 0, "aarch32 vector catch" },
- { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
+ { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
{ do_bad, SIGBUS, 0, "unknown 7" },
};
@@ -536,3 +554,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
return 0;
}
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+ config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 4dfa3975ce5b..c26b804015e8 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -60,14 +60,10 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
-#ifdef CONFIG_SMP
preempt_disable();
-#endif
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len);
-#ifdef CONFIG_SMP
preempt_enable();
-#endif
}
void __sync_icache_dcache(pte_t pte, unsigned long addr)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 831ec534d449..383b03ff38f8 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad87ce826cce..f5c0680d17d9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -358,9 +358,9 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
-static int keep_initrd;
+static int keep_initrd __initdata;
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a4ede4e2ddd1..9211b8527f25 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -267,7 +267,7 @@ static void *late_alloc(unsigned long size)
return ptr;
}
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
@@ -461,17 +461,6 @@ void __init paging_init(void)
}
/*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
- cpu_set_reserved_ttbr0();
- flush_tlb_all();
- cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
* Check whether a kernel address is valid (derived from arch/x86/).
*/
int kern_addr_valid(unsigned long addr)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 160a1b5ab9c6..7783ff05f74c 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -34,11 +34,7 @@
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
#endif
-#ifdef CONFIG_SMP
#define TCR_SMP_FLAGS TCR_SHARED
-#else
-#define TCR_SMP_FLAGS 0
-#endif
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -150,13 +146,13 @@ ENDPROC(cpu_do_switch_mm)
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
dsb ish
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
- msr mdscr_el1, xzr // Reset mdscr_el1
+ mov x0, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x0 // access to the DCC from EL0
/*
* Memory region attributes for LPAE:
*
@@ -198,6 +194,19 @@ ENTRY(__cpu_setup)
*/
mrs x9, ID_AA64MMFR0_EL1
bfi x10, x9, #32, #3
+#ifdef CONFIG_ARM64_HW_AFDBM
+ /*
+ * Hardware update of the Access and Dirty bits.
+ */
+ mrs x9, ID_AA64MMFR1_EL1
+ and x9, x9, #0xf
+ cbz x9, 2f
+ cmp x9, #2
+ b.lt 1f
+ orr x10, x10, #TCR_HD // hardware Dirty flag update
+1: orr x10, x10, #TCR_HA // hardware Access flag update
+2:
+#endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10
ret // return to head.S
ENDPROC(__cpu_setup)