diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2022-08-06 10:24:07 +0300 |
---|---|---|
committer | Alexander Gordeev <agordeev@linux.ibm.com> | 2022-08-06 10:24:07 +0300 |
commit | 5e441f61f509617a3f57fcb156b7aa2870cc8752 (patch) | |
tree | 267f0773e33ab237296a566040bc0993ad4ebb0c /arch/s390/mm | |
parent | 3fb39cb7c5145a10e1a0221c057e92fb8855efbb (diff) | |
download | linux-5e441f61f509617a3f57fcb156b7aa2870cc8752.tar.xz |
Revert "s390/smp: rework absolute lowcore access"
This reverts commit 7d06fed77b7d8fc9f6cc41b4e3f2823d32532ad8.
This introduced vmem_mutex locking from vmem_map_4k_page()
function called from smp_reinit_ipl_cpu() with interrupts
disabled. While it is a pre-SMP early initcall no other CPUs
running in parallel nor other code taking vmem_mutex on this
boot stage - it still needs to be fixed.
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/maccess.c | 67 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 85 |
3 files changed, 38 insertions, 116 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 7b6873ac99d1..6a0ac00d5a42 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -38,7 +38,7 @@ #include <asm/kfence.h> #include <asm/ptdump.h> #include <asm/dma.h> -#include <asm/abs_lowcore.h> +#include <asm/lowcore.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/sections.h> diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index b8451ddbb3d6..d6d84e02f35a 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -15,7 +15,6 @@ #include <asm/asm-extable.h> #include <asm/ctl_reg.h> #include <asm/io.h> -#include <asm/abs_lowcore.h> #include <asm/stacktrace.h> static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) @@ -149,20 +148,46 @@ int memcpy_real(void *dest, unsigned long src, size_t count) } /* - * Find CPU that owns swapped prefix page + * Copy memory in absolute mode (kernel to kernel) */ -static int get_swapped_owner(phys_addr_t addr) +void memcpy_absolute(void *dest, void *src, size_t count) +{ + unsigned long cr0, flags, prefix; + + flags = arch_local_irq_save(); + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + prefix = store_prefix(); + if (prefix) { + local_mcck_disable(); + set_prefix(0); + memcpy(dest, src, count); + set_prefix(prefix); + local_mcck_enable(); + } else { + memcpy(dest, src, count); + } + __ctl_load(cr0, 0, 0); + arch_local_irq_restore(flags); +} + +/* + * Check if physical address is within prefix or zero page + */ +static int is_swapped(phys_addr_t addr) { phys_addr_t lc; int cpu; + if (addr < sizeof(struct lowcore)) + return 1; for_each_online_cpu(cpu) { lc = virt_to_phys(lowcore_ptr[cpu]); if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) continue; - return cpu; + return 1; } - return -1; + return 0; } /* @@ -175,35 +200,17 @@ void *xlate_dev_mem_ptr(phys_addr_t addr) { void *ptr = phys_to_virt(addr); void *bounce = ptr; - struct lowcore *abs_lc; - unsigned long flags; unsigned long size; - int this_cpu, cpu; cpus_read_lock(); - this_cpu = get_cpu(); - if (addr >= sizeof(struct lowcore)) { - cpu = get_swapped_owner(addr); - if (cpu < 0) - goto out; - } - bounce = (void *)__get_free_page(GFP_ATOMIC); - if (!bounce) - goto out; - size = PAGE_SIZE - (addr & ~PAGE_MASK); - if (addr < sizeof(struct lowcore)) { - abs_lc = get_abs_lowcore(&flags); - ptr = (void *)abs_lc + addr; - memcpy(bounce, ptr, size); - put_abs_lowcore(abs_lc, flags); - } else if (cpu == this_cpu) { - ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu])); - memcpy(bounce, ptr, size); - } else { - memcpy(bounce, ptr, size); + preempt_disable(); + if (is_swapped(addr)) { + size = PAGE_SIZE - (addr & ~PAGE_MASK); + bounce = (void *) __get_free_page(GFP_ATOMIC); + if (bounce) + memcpy_absolute(bounce, ptr, size); } -out: - put_cpu(); + preempt_enable(); cpus_read_unlock(); return bounce; } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 203ba2bfea59..c2583f921ca8 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -561,91 +561,6 @@ int vmem_add_mapping(unsigned long start, unsigned long size) } /* - * Allocate new or return existing page-table entry, but do not map it - * to any physical address. If missing, allocate segment- and region- - * table entries along. Meeting a large segment- or region-table entry - * while traversing is an error, since the function is expected to be - * called against virtual regions reserverd for 4KB mappings only. - */ -static pte_t *vmem_get_alloc_pte(unsigned long addr) -{ - pte_t *ptep = NULL; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset_k(addr); - if (pgd_none(*pgd)) { - p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); - if (!p4d) - goto out; - pgd_populate(&init_mm, pgd, p4d); - } - p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) { - pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); - if (!pud) - goto out; - p4d_populate(&init_mm, p4d, pud); - } - pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { - pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); - if (!pmd) - goto out; - pud_populate(&init_mm, pud, pmd); - } else if (WARN_ON_ONCE(pud_large(*pud))) { - goto out; - } - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte = vmem_pte_alloc(); - if (!pte) - goto out; - pmd_populate(&init_mm, pmd, pte); - } else if (WARN_ON_ONCE(pmd_large(*pmd))) { - goto out; - } - ptep = pte_offset_kernel(pmd, addr); -out: - return ptep; -} - -int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot) -{ - pte_t *ptep, pte; - int rc = 0; - - if (!IS_ALIGNED(addr, PAGE_SIZE)) - return -EINVAL; - mutex_lock(&vmem_mutex); - ptep = vmem_get_alloc_pte(addr); - if (!ptep) { - rc = -ENOMEM; - goto out; - } - __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); - pte = mk_pte_phys(phys, prot); - set_pte(ptep, pte); -out: - mutex_unlock(&vmem_mutex); - return rc; -} - -void vmem_unmap_4k_page(unsigned long addr) -{ - pte_t *ptep; - - mutex_lock(&vmem_mutex); - ptep = virt_to_kpte(addr); - __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); - pte_clear(&init_mm, addr, ptep); - mutex_unlock(&vmem_mutex); -} - -/* * map whole physical memory to virtual memory (identity mapping) * we reserve enough space in the vmalloc area for vmemmap to hotplug * additional memory segments. |