From 284e21fab2cfcf90dacce565e0b12f29e5df00c1 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:17 -0300 Subject: x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header Now that generic code doesn't reference them, move sme_active() and sme_me_mask to x86's . Also remove the export for sme_active() since it's only used in files that won't be built as modules. sme_me_mask on the other hand is used in arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a module so its export needs to stay. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Tom Lendacky Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-5-bauerman@linux.ibm.com --- include/linux/mem_encrypt.h | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df..0c5b0ff9eb29 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,11 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } static inline bool sev_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing -- cgit v1.2.3 From ae7eb82a92fae4d255a0caa9f7c0f99e3babfec1 Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 6 Aug 2019 01:49:18 -0300 Subject: fs/core/vmcore: Move sev_active() reference to x86 arch code Secure Encrypted Virtualization is an x86-specific feature, so it shouldn't appear in generic kernel code because it forces non-x86 architectures to define the sev_active() function, which doesn't make a lot of sense. To solve this problem, add an x86 elfcorehdr_read() function to override the generic weak implementation. To do that, it's necessary to make read_from_oldmem() public so that it can be used outside of vmcore.c. Also, remove the export for sev_active() since it's only used in files that won't be built as modules. Signed-off-by: Thiago Jung Bauermann Reviewed-by: Christoph Hellwig Reviewed-by: Lianbo Jiang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190806044919.10622-6-bauerman@linux.ibm.com --- arch/x86/kernel/crash_dump_64.c | 5 +++++ arch/x86/mm/mem_encrypt.c | 1 - fs/proc/vmcore.c | 8 ++++---- include/linux/crash_dump.h | 14 ++++++++++++++ include/linux/mem_encrypt.h | 1 - 5 files changed, 23 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 22369dd5de3b..045e82e8945b 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, { return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); } + +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) +{ + return read_from_oldmem(buf, count, ppos, 0, sev_active()); +} diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 94da5a88abe6..9268c12458c8 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -349,7 +349,6 @@ bool sev_active(void) { return sme_me_mask && sev_enabled; } -EXPORT_SYMBOL(sev_active); /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 7bcc92add72c..7b13988796e1 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -104,9 +104,9 @@ static int pfn_is_ram(unsigned long pfn) } /* Reads a page from the oldmem device from given offset. */ -static ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted) +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) { unsigned long pfn, offset; size_t nr_bytes; @@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr) */ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, sev_active()); + return read_from_oldmem(buf, count, ppos, 0, false); } /* diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index f774c5eb9e3c..4664fc1871de 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ + +#ifdef CONFIG_PROC_VMCORE +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted); +#else +static inline ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE */ + #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 0c5b0ff9eb29..5c4a18a91f89 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -19,7 +19,6 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ static inline bool mem_encrypt_active(void) { return false; } -static inline bool sev_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -- cgit v1.2.3