summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/boot/Makefile4
-rwxr-xr-x[-rw-r--r--]arch/powerpc/boot/install.sh23
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h31
-rw-r--r--arch/powerpc/include/asm/bug.h14
-rw-r--r--arch/powerpc/include/asm/hugetlb.h9
-rw-r--r--arch/powerpc/include/asm/mman.h12
-rw-r--r--arch/powerpc/include/asm/svm.h4
-rw-r--r--arch/powerpc/include/asm/swiotlb.h1
-rw-r--r--arch/powerpc/include/asm/timex.h1
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h10
-rw-r--r--arch/powerpc/kernel/crash_dump.c35
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/fadump.c2
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c17
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/platforms/pseries/svm.c26
23 files changed, 78 insertions, 133 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f5ed355e75e6..3eaddb8997a9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -158,6 +159,7 @@ config PPC
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
@@ -569,7 +571,6 @@ config RELOCATABLE
bool "Build a relocatable kernel"
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL
- select MODULE_REL_CRCS if MODVERSIONS
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8bd3b631f094..a0cd70712061 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -408,8 +408,7 @@ endef
PHONY += install
install:
- sh -x $(srctree)/$(boot)/install.sh "$(KERNELRELEASE)" vmlinux \
- System.map "$(INSTALL_PATH)"
+ $(call cmd,install)
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8baa928381ff..a9cd2ea4a861 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -459,8 +459,8 @@ clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
clean-kernel-base := vmlinux.strip vmlinux.bin
clean-kernel := $(addsuffix .gz,$(clean-kernel-base))
clean-kernel += $(addsuffix .xz,$(clean-kernel-base))
-# If not absolute clean-files are relative to $(obj).
-clean-files += $(addprefix $(objtree)/, $(clean-kernel))
+# clean-files are relative to $(obj).
+clean-files += $(addprefix ../../../, $(clean-kernel))
WRAPPER_OBJDIR := /usr/lib/kernel-wrapper
WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 14473150ddb4..461902c8a46d 100644..100755
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -15,32 +15,9 @@
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
-#
-# Bail with error code if anything goes wrong
set -e
-verify () {
- if [ ! -f "$1" ]; then
- echo "" 1>&2
- echo " *** Missing file: $1" 1>&2
- echo ' *** You need to run "make" before "make install".' 1>&2
- echo "" 1>&2
- exit 1
- fi
-}
-
-# Make sure the files actually exist
-verify "$2"
-verify "$3"
-
-# User may have a custom install script
-
-if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
-if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
-
-# Default install
-
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index a4a805b87469..fb314f75ad4b 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -53,7 +53,6 @@ CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_PCNET32=y
CONFIG_NET_TULIP=y
-CONFIG_DE4X5=y
CONFIG_MV643XX_ETH=y
CONFIG_8139CP=y
CONFIG_8139TOO=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index bb549cb1c3e3..b622ecd73286 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -444,7 +444,6 @@ CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
CONFIG_TULIP_MMIO=y
-CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 010eb373fcb3..cb9d5fd39d7f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -13,7 +13,6 @@
/*
* Common bits between hash and Radix page table
*/
-#define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
@@ -754,17 +753,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Don't have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte. \
*/ \
- BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE); \
} while (0)
#define SWP_TYPE_BITS 5
-#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
- & ((1UL << SWP_TYPE_BITS) - 1))
+#define SWP_TYPE_MASK ((1UL << SWP_TYPE_BITS) - 1)
+#define __swp_type(x) ((x).val & SWP_TYPE_MASK)
#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << _PAGE_BIT_SWAP_TYPE) \
- | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
+ (type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
/*
* swp_entry_t must be independent of pte bits. We build a swp_entry_t from
* swap type and offset we get from swap and convert that to pte to find a
@@ -777,11 +776,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_SOFT_DIRTY
#else
#define _PAGE_SWP_SOFT_DIRTY 0UL
#endif /* CONFIG_MEM_SOFT_DIRTY */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_NON_IDEMPOTENT
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
@@ -799,6 +800,22 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
+}
+
static inline bool check_pte_access(unsigned long access, unsigned long ptev)
{
/*
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index ecbae1832de3..61a4736355c2 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -13,7 +13,8 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro __EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"aw"
-5001: .4byte \addr - 5001b, 5002f - 5001b
+5001: .4byte \addr - .
+ .4byte 5002f - .
.short \line, \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
@@ -24,7 +25,7 @@
#else
.macro __EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"aw"
-5001: .4byte \addr - 5001b
+5001: .4byte \addr - .
.short \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
@@ -49,15 +50,16 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"aw\"\n" \
- "2:\t.4byte 1b - 2b, %0 - 2b\n" \
- "\t.short %1, %2\n" \
+ "2: .4byte 1b - .\n" \
+ " .4byte %0 - .\n" \
+ " .short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"aw\"\n" \
- "2:\t.4byte 1b - 2b\n" \
- "\t.short %2\n" \
+ "2: .4byte 1b - .\n" \
+ " .short %2\n" \
".org 2b+%3\n" \
".previous\n"
#endif
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ef86197d1c0a..32ce0fb7548f 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -43,11 +43,14 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_hugetlb_page(vma, addr);
+ return pte;
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7cb6d18f5cd6..1b024e64c8ec 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -24,18 +24,6 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
-#ifdef CONFIG_PPC_MEM_KEYS
- return (vm_flags & VM_SAO) ?
- __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
- __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
-#else
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
-#endif
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
index da864623cc00..a02bd54b8948 100644
--- a/arch/powerpc/include/asm/svm.h
+++ b/arch/powerpc/include/asm/svm.h
@@ -17,8 +17,6 @@ static inline bool is_secure_guest(void)
return mfmsr() & MSR_S;
}
-void __init svm_swiotlb_init(void);
-
void dtl_cache_ctor(void *addr);
#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
@@ -29,8 +27,6 @@ static inline bool is_secure_guest(void)
return false;
}
-static inline void svm_swiotlb_init(void) {}
-
#define get_dtl_cache_ctor() NULL
#endif /* CONFIG_PPC_SVM */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 3c1a1cd16128..4203b5e0a88e 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -9,6 +9,7 @@
#include <linux/swiotlb.h>
extern unsigned int ppc_swiotlb_enable;
+extern unsigned int ppc_swiotlb_flags;
#ifdef CONFIG_SWIOTLB
void swiotlb_detect_4g(void);
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index fa2e76e4093a..14b4489de52c 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void)
{
return mftb();
}
+#define get_cycles get_cycles
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TIMEX_H */
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
index 7871055e5e32..a28c9a1201fa 100644
--- a/arch/powerpc/include/uapi/asm/stat.h
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -29,16 +29,16 @@ struct __old_kernel_stat {
struct stat {
unsigned long st_dev;
- ino_t st_ino;
+ __kernel_ino_t st_ino;
#ifdef __powerpc64__
unsigned long st_nlink;
- mode_t st_mode;
+ __kernel_mode_t st_mode;
#else
- mode_t st_mode;
+ __kernel_mode_t st_mode;
unsigned short st_nlink;
#endif
- uid_t st_uid;
- gid_t st_gid;
+ __kernel_uid_t st_uid;
+ __kernel_gid_t st_gid;
unsigned long st_rdev;
long st_size;
unsigned long st_blksize;
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index c438c60fbc54..9a3b85bfc83f 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -16,7 +16,7 @@
#include <asm/code-patching.h>
#include <asm/kdump.h>
#include <asm/firmware.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
@@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void)
}
#endif /* CONFIG_NONSTATIC_KERNEL */
-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
- unsigned long offset, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
- return -EFAULT;
- } else
- memcpy(buf, (vaddr + offset), csize);
-
- return csize;
-}
-
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
@@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index fc7816126a40..ba256c37bcc0 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -10,6 +10,7 @@
#include <asm/swiotlb.h>
unsigned int ppc_swiotlb_enable;
+unsigned int ppc_swiotlb_flags;
void __init swiotlb_detect_4g(void)
{
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index a171d0f6621d..ea0a073abd96 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -758,7 +758,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* FIXME: How do i get PID? Do I really need it?
* prstatus.pr_pid = ????
*/
- elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index f3999cbb2fcc..1a14c8780278 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -24,7 +24,6 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
user_read_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
@@ -38,4 +37,3 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
user_write_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 071bb66c3ad9..7b9966402b25 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -7,6 +7,7 @@
#include <linux/mm_types.h>
#include <linux/memblock.h>
#include <linux/memremap.h>
+#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <misc/cxl-base.h>
@@ -549,3 +550,19 @@ unsigned long memremap_compat_align(void)
}
EXPORT_SYMBOL_GPL(memremap_compat_align);
#endif
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ unsigned long prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_SAO)
+ prot |= _PAGE_SAO;
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ prot |= vmflag_to_pte_pkey_bits(vm_flags);
+#endif
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 09b7b5e7bb9a..52b77684acda 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -17,6 +17,7 @@
#include <linux/suspend.h>
#include <linux/dma-direct.h>
+#include <asm/swiotlb.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
@@ -250,10 +251,7 @@ void __init mem_init(void)
* back to to-down.
*/
memblock_set_bottom_up(true);
- if (is_secure_guest())
- svm_swiotlb_init();
- else
- swiotlb_init(0);
+ swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index b97bc179f65a..adcb1a1a2bfe 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -112,7 +112,7 @@ static void __init fill_prstatus(struct elf_prstatus *prstatus, int pir,
struct pt_regs *regs)
{
memset(prstatus, 0, sizeof(struct elf_prstatus));
- elf_core_copy_kernel_regs(&(prstatus->pr_reg), regs);
+ elf_core_copy_regs(&(prstatus->pr_reg), regs);
/*
* Overload PID with PIR value.
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c9fcc30a0365..afb074269b42 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -839,9 +839,6 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
- if (swiotlb_force == SWIOTLB_FORCE)
- ppc_swiotlb_enable = 1;
}
static void pseries_panic(char *str)
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index c5228f4969eb..3b4045d508ec 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -28,7 +28,7 @@ static int __init init_svm(void)
* need to use the SWIOTLB buffer for DMA even if dma_capable() says
* otherwise.
*/
- swiotlb_force = SWIOTLB_FORCE;
+ ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
/* Share the SWIOTLB buffer with the host. */
swiotlb_update_mem_attributes();
@@ -37,30 +37,6 @@ static int __init init_svm(void)
}
machine_early_initcall(pseries, init_svm);
-/*
- * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
- * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
- * any addressing limitation, we don't need to allocate it in low addresses.
- */
-void __init svm_swiotlb_init(void)
-{
- unsigned char *vstart;
- unsigned long bytes, io_tlb_nslabs;
-
- io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
- vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
- return;
-
-
- memblock_free(vstart, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- panic("SVM: Cannot allocate SWIOTLB buffer");
-}
-
int set_memory_encrypted(unsigned long addr, int numpages)
{
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))