summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/acrn.h78
-rw-r--r--arch/x86/include/asm/apb_timer.h40
-rw-r--r--arch/x86/include/asm/crypto/camellia.h91
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h118
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h42
-rw-r--r--arch/x86/include/asm/crypto/serpent-sse2.h60
-rw-r--r--arch/x86/include/asm/crypto/twofish.h25
-rw-r--r--arch/x86/include/asm/fixmap.h3
-rw-r--r--arch/x86/include/asm/idtentry.h11
-rw-r--r--arch/x86/include/asm/insn.h45
-rw-r--r--arch/x86/include/asm/intel-mid.h93
-rw-r--r--arch/x86/include/asm/intel_mid_vrtc.h10
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h2
-rw-r--r--arch/x86/include/asm/intel_scu_ipc_legacy.h91
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/irq_stack.h279
-rw-r--r--arch/x86/include/asm/kfence.h64
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h36
-rw-r--r--arch/x86/include/asm/orc_types.h10
-rw-r--r--arch/x86/include/asm/platform_sst_audio.h2
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/include/asm/softirq_stack.h11
-rw-r--r--arch/x86/include/asm/unwind_hints.h13
24 files changed, 426 insertions, 715 deletions
diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h
new file mode 100644
index 000000000000..e003a01b7c67
--- /dev/null
+++ b/arch/x86/include/asm/acrn.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ACRN_H
+#define _ASM_X86_ACRN_H
+
+/*
+ * This CPUID returns feature bitmaps in EAX.
+ * Guest VM uses this to detect the appropriate feature bit.
+ */
+#define ACRN_CPUID_FEATURES 0x40000001
+/* Bit 0 indicates whether guest VM is privileged */
+#define ACRN_FEATURE_PRIVILEGED_VM BIT(0)
+
+void acrn_setup_intr_handler(void (*handler)(void));
+void acrn_remove_intr_handler(void);
+
+static inline u32 acrn_cpuid_base(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return hypervisor_cpuid_base("ACRNACRNACRN", 0);
+
+ return 0;
+}
+
+/*
+ * Hypercalls for ACRN
+ *
+ * - VMCALL instruction is used to implement ACRN hypercalls.
+ * - ACRN hypercall ABI:
+ * - Hypercall number is passed in R8 register.
+ * - Up to 2 arguments are passed in RDI, RSI.
+ * - Return value will be placed in RAX.
+ *
+ * Because GCC doesn't support R8 register as direct register constraints, use
+ * supported constraint as input with a explicit MOV to R8 in beginning of asm.
+ */
+static inline long acrn_hypercall0(unsigned long hcall_id)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id)
+ : "r8", "memory");
+
+ return result;
+}
+
+static inline long acrn_hypercall1(unsigned long hcall_id,
+ unsigned long param1)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id), "D" (param1)
+ : "r8", "memory");
+
+ return result;
+}
+
+static inline long acrn_hypercall2(unsigned long hcall_id,
+ unsigned long param1,
+ unsigned long param2)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id), "D" (param1), "S" (param2)
+ : "r8", "memory");
+
+ return result;
+}
+
+#endif /* _ASM_X86_ACRN_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
deleted file mode 100644
index 87ce8e963215..000000000000
--- a/arch/x86/include/asm/apb_timer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
- *
- * (C) Copyright 2009 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * Note:
- */
-
-#ifndef ASM_X86_APBT_H
-#define ASM_X86_APBT_H
-#include <linux/sfi.h>
-
-#ifdef CONFIG_APB_TIMER
-
-/* default memory mapped register base */
-#define LNW_SCU_ADDR 0xFF100000
-#define LNW_EXT_TIMER_OFFSET 0x1B800
-#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
-#define LNW_EXT_TIMER_PGOFFSET 0x800
-
-/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
-#define APBT_MAX_FREQ 50000000
-#define APBT_MIN_FREQ 1000000
-#define APBT_MMAP_SIZE 1024
-
-extern void apbt_time_init(void);
-extern void apbt_setup_secondary_clock(void);
-
-extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
-extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
-extern int sfi_mtimer_num;
-
-#else /* CONFIG_APB_TIMER */
-
-static inline void apbt_time_init(void) { }
-
-#endif
-#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
deleted file mode 100644
index f6d91861cb14..000000000000
--- a/arch/x86/include/asm/crypto/camellia.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_CAMELLIA_H
-#define ASM_X86_CAMELLIA_H
-
-#include <crypto/b128ops.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-
-#define CAMELLIA_MIN_KEY_SIZE 16
-#define CAMELLIA_MAX_KEY_SIZE 32
-#define CAMELLIA_BLOCK_SIZE 16
-#define CAMELLIA_TABLE_BYTE_LEN 272
-#define CAMELLIA_PARALLEL_BLOCKS 2
-
-struct crypto_skcipher;
-
-struct camellia_ctx {
- u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
- u32 key_length;
-};
-
-struct camellia_xts_ctx {
- struct camellia_ctx tweak_ctx;
- struct camellia_ctx crypt_ctx;
-};
-
-extern int __camellia_setkey(struct camellia_ctx *cctx,
- const unsigned char *key,
- unsigned int key_len);
-
-extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
-
-/* regular block cipher functions */
-asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
- bool xor);
-asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
-
-/* 2-way parallel cipher functions */
-asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
- bool xor);
-asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
-
-/* 16-way parallel cipher functions (avx/aes-ni) */
-asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
-
-asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src)
-{
- __camellia_enc_blk(ctx, dst, src, false);
-}
-
-static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src)
-{
- __camellia_enc_blk(ctx, dst, src, true);
-}
-
-static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk_2way(ctx, dst, src, false);
-}
-
-static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk_2way(ctx, dst, src, true);
-}
-
-/* glue helpers */
-extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src);
-extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
deleted file mode 100644
index 777c0f63418c..000000000000
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Shared glue code for 128bit block ciphers
- */
-
-#ifndef _CRYPTO_GLUE_HELPER_H
-#define _CRYPTO_GLUE_HELPER_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <asm/fpu/api.h>
-#include <crypto/b128ops.h>
-
-typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-struct common_glue_func_entry {
- unsigned int num_blocks; /* number of blocks that @fn will process */
- union {
- common_glue_func_t ecb;
- common_glue_cbc_func_t cbc;
- common_glue_ctr_func_t ctr;
- common_glue_xts_func_t xts;
- } fn_u;
-};
-
-struct common_glue_ctx {
- unsigned int num_funcs;
- int fpu_blocks_limit; /* -1 means fpu not needed at all */
-
- /*
- * First funcs entry must have largest num_blocks and last funcs entry
- * must have num_blocks == 1!
- */
- struct common_glue_func_entry funcs[];
-};
-
-static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
- struct skcipher_walk *walk,
- bool fpu_enabled, unsigned int nbytes)
-{
- if (likely(fpu_blocks_limit < 0))
- return false;
-
- if (fpu_enabled)
- return true;
-
- /*
- * Vector-registers are only used when chunk to be processed is large
- * enough, so do not enable FPU until it is necessary.
- */
- if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
- return false;
-
- /* prevent sleeping if FPU is in use */
- skcipher_walk_atomise(walk);
-
- kernel_fpu_begin();
- return true;
-}
-
-static inline void glue_fpu_end(bool fpu_enabled)
-{
- if (fpu_enabled)
- kernel_fpu_end();
-}
-
-static inline void le128_to_be128(be128 *dst, const le128 *src)
-{
- dst->a = cpu_to_be64(le64_to_cpu(src->a));
- dst->b = cpu_to_be64(le64_to_cpu(src->b));
-}
-
-static inline void be128_to_le128(le128 *dst, const be128 *src)
-{
- dst->a = cpu_to_le64(be64_to_cpu(src->a));
- dst->b = cpu_to_le64(be64_to_cpu(src->b));
-}
-
-static inline void le128_inc(le128 *i)
-{
- u64 a = le64_to_cpu(i->a);
- u64 b = le64_to_cpu(i->b);
-
- b++;
- if (!b)
- a++;
-
- i->a = cpu_to_le64(a);
- i->b = cpu_to_le64(b);
-}
-
-extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
- struct skcipher_request *req);
-
-extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx, bool decrypt);
-
-extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv,
- common_glue_func_t fn);
-
-#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
deleted file mode 100644
index 251c2c89d7cf..000000000000
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_SERPENT_AVX_H
-#define ASM_X86_SERPENT_AVX_H
-
-#include <crypto/b128ops.h>
-#include <crypto/serpent.h>
-#include <linux/types.h>
-
-struct crypto_skcipher;
-
-#define SERPENT_PARALLEL_BLOCKS 8
-
-struct serpent_xts_ctx {
- struct serpent_ctx tweak_ctx;
- struct serpent_ctx crypt_ctx;
-};
-
-asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-
-asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-
-extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
-extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
-
-extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
-
-#endif
diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h
deleted file mode 100644
index 860ca248914b..000000000000
--- a/arch/x86/include/asm/crypto/serpent-sse2.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_SERPENT_SSE2_H
-#define ASM_X86_SERPENT_SSE2_H
-
-#include <linux/crypto.h>
-#include <crypto/serpent.h>
-
-#ifdef CONFIG_X86_32
-
-#define SERPENT_PARALLEL_BLOCKS 4
-
-asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-
-static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
-{
- __serpent_enc_blk_4way(ctx, dst, src, false);
-}
-
-static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
- u8 *dst, const u8 *src)
-{
- __serpent_enc_blk_4way(ctx, dst, src, true);
-}
-
-static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
-{
- serpent_dec_blk_4way(ctx, dst, src);
-}
-
-#else
-
-#define SERPENT_PARALLEL_BLOCKS 8
-
-asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-
-static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
-{
- __serpent_enc_blk_8way(ctx, dst, src, false);
-}
-
-static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
- u8 *dst, const u8 *src)
-{
- __serpent_enc_blk_8way(ctx, dst, src, true);
-}
-
-static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
-{
- serpent_dec_blk_8way(ctx, dst, src);
-}
-
-#endif
-
-#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
deleted file mode 100644
index 2c377a8042e1..000000000000
--- a/arch/x86/include/asm/crypto/twofish.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_TWOFISH_H
-#define ASM_X86_TWOFISH_H
-
-#include <linux/crypto.h>
-#include <crypto/twofish.h>
-#include <crypto/b128ops.h>
-
-/* regular block cipher functions from twofish_x86_64 module */
-asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src);
-
-/* 3-way parallel cipher functions */
-asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src,
- bool xor);
-asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src);
-
-/* helpers from twofish_x86_64-3way module */
-extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src);
-extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-#endif /* ASM_X86_TWOFISH_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 9f1a0a987e5e..d0dcefb5cc59 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -108,9 +108,6 @@ enum fixed_addresses {
#ifdef CONFIG_PARAVIRT_XXL
FIX_PARAVIRT_BOOTMAP,
#endif
-#ifdef CONFIG_X86_INTEL_MID
- FIX_LNW_VRTC,
-#endif
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 41e2e2e1b439..5eb3bdf36a41 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -187,23 +187,22 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
* has to be done in the function body if necessary.
*/
#define DEFINE_IDTENTRY_IRQ(func) \
-static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
+static void __##func(struct pt_regs *regs, u32 vector); \
\
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
+ u32 vector = (u32)(u8)error_code; \
\
instrumentation_begin(); \
- irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
- __##func (regs, (u8)error_code); \
- irq_exit_rcu(); \
+ run_irq_on_irqstack_cond(__##func, regs, vector); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
\
-static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+static noinline void __##func(struct pt_regs *regs, u32 vector)
/**
* DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
@@ -237,10 +236,8 @@ __visible noinstr void func(struct pt_regs *regs) \
irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
- irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_sysvec_on_irqstack_cond(__##func, regs); \
- irq_exit_rcu(); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index a8c3d284fa46..95a448fbb44c 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -7,9 +7,12 @@
* Copyright (C) IBM Corporation, 2009
*/
+#include <asm/byteorder.h>
/* insn_attr_t is defined in inat.h */
#include <asm/inat.h>
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
+
struct insn_field {
union {
insn_value_t value;
@@ -20,6 +23,48 @@ struct insn_field {
unsigned char nbytes;
};
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+}
+
+#else
+
+struct insn_field {
+ insn_value_t value;
+ union {
+ insn_value_t little;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->little = __cpu_to_le32(v);
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+ p->value = __le32_to_cpu(p->little);
+}
+#endif
+
struct insn {
struct insn_field prefixes; /*
* Prefixes
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index cf0e25f45422..c201083b34f6 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -1,15 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * intel-mid.h: Intel MID specific setup code
+ * Intel MID specific setup code
*
- * (C) Copyright 2009 Intel Corporation
+ * (C) Copyright 2009, 2021 Intel Corporation
*/
#ifndef _ASM_X86_INTEL_MID_H
#define _ASM_X86_INTEL_MID_H
-#include <linux/sfi.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
@@ -22,93 +20,18 @@ extern void intel_mid_pwr_power_off(void);
extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
-extern int get_gpio_by_name(const char *name);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Here defines the array of devices platform data that IAFW would export
- * through SFI "DEVS" table, we use name and type to match the device and
- * its platform data.
- */
-struct devs_id {
- char name[SFI_NAME_LEN + 1];
- u8 type;
- u8 delay;
- u8 msic;
- void *(*get_platform_data)(void *info);
-};
-
-#define sfi_device(i) \
- static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
- __section(".x86_intel_mid_dev.init") = &i
-
-/**
-* struct mid_sd_board_info - template for SD device creation
-* @name: identifies the driver
-* @bus_num: board-specific identifier for a given SD controller
-* @max_clk: the maximum frequency device supports
-* @platform_data: the particular data stored there is driver-specific
-*/
-struct mid_sd_board_info {
- char name[SFI_NAME_LEN];
- int bus_num;
- unsigned short addr;
- u32 max_clk;
- void *platform_data;
-};
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum intel_mid_cpu_type {
- /* 1 was Moorestown */
- INTEL_MID_CPU_CHIP_PENWELL = 2,
- INTEL_MID_CPU_CHIP_CLOVERVIEW,
- INTEL_MID_CPU_CHIP_TANGIER,
-};
-
-extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
-
#ifdef CONFIG_X86_INTEL_MID
-static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
-{
- return __intel_mid_cpu_chip;
-}
-
-static inline bool intel_mid_has_msic(void)
-{
- return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
-}
-
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
#else /* !CONFIG_X86_INTEL_MID */
-#define intel_mid_identify_cpu() 0
-#define intel_mid_has_msic() 0
-
static inline void intel_scu_devices_create(void) { }
static inline void intel_scu_devices_destroy(void) { }
#endif /* !CONFIG_X86_INTEL_MID */
-enum intel_mid_timer_options {
- INTEL_MID_TIMER_DEFAULT,
- INTEL_MID_TIMER_APBT_ONLY,
- INTEL_MID_TIMER_LAPIC_APBT,
-};
-
-extern enum intel_mid_timer_options intel_mid_timer_options;
-
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
@@ -118,16 +41,4 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
/* FSB 83MHz */
#define BSEL_SOC_FUSE_111 0x7
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ 1024
-/* #define MRST_VRTC_PGOFFSET 0xc00 */
-
-extern void intel_mid_rtc_init(void);
-
-/* The offset for the mapping of global gpio pin to irq */
-#define INTEL_MID_IRQ_OFFSET 0x100
-
#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
deleted file mode 100644
index 0b44b1abe4d9..000000000000
--- a/arch/x86/include/asm/intel_mid_vrtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INTEL_MID_VRTC_H
-#define _INTEL_MID_VRTC_H
-
-extern unsigned char vrtc_cmos_read(unsigned char reg);
-extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
-extern void vrtc_get_time(struct timespec64 *now);
-extern int vrtc_set_mmss(const struct timespec64 *now);
-
-#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 11d457af68c5..8537f597d20a 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -65,6 +65,4 @@ static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int c
inlen, out, outlen);
}
-#include <asm/intel_scu_ipc_legacy.h>
-
#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc_legacy.h b/arch/x86/include/asm/intel_scu_ipc_legacy.h
deleted file mode 100644
index 4cf13fecb673..000000000000
--- a/arch/x86/include/asm/intel_scu_ipc_legacy.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_SCU_IPC_LEGACY_H_
-#define _ASM_X86_INTEL_SCU_IPC_LEGACY_H_
-
-#include <linux/notifier.h>
-
-#define IPCMSG_INDIRECT_READ 0x02
-#define IPCMSG_INDIRECT_WRITE 0x05
-
-#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
-
-#define IPCMSG_WARM_RESET 0xF0
-#define IPCMSG_COLD_RESET 0xF1
-#define IPCMSG_SOFT_RESET 0xF2
-#define IPCMSG_COLD_BOOT 0xF3
-
-#define IPCMSG_VRTC 0xFA /* Set vRTC device */
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
-
-/* Don't call these in new code - they will be removed eventually */
-
-/* Read single register */
-static inline int intel_scu_ipc_ioread8(u16 addr, u8 *data)
-{
- return intel_scu_ipc_dev_ioread8(NULL, addr, data);
-}
-
-/* Read a vector */
-static inline int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
-{
- return intel_scu_ipc_dev_readv(NULL, addr, data, len);
-}
-
-/* Write single register */
-static inline int intel_scu_ipc_iowrite8(u16 addr, u8 data)
-{
- return intel_scu_ipc_dev_iowrite8(NULL, addr, data);
-}
-
-/* Write a vector */
-static inline int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
-{
- return intel_scu_ipc_dev_writev(NULL, addr, data, len);
-}
-
-/* Update single register based on the mask */
-static inline int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask)
-{
- return intel_scu_ipc_dev_update(NULL, addr, data, mask);
-}
-
-/* Issue commands to the SCU with or without data */
-static inline int intel_scu_ipc_simple_command(int cmd, int sub)
-{
- return intel_scu_ipc_dev_simple_command(NULL, cmd, sub);
-}
-
-static inline int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen)
-{
- /* New API takes both inlen and outlen as bytes so convert here */
- size_t inbytes = inlen * sizeof(u32);
- size_t outbytes = outlen * sizeof(u32);
-
- return intel_scu_ipc_dev_command_with_size(NULL, cmd, sub, in, inbytes,
- inlen, out, outbytes);
-}
-
-extern struct blocking_notifier_head intel_scu_notifier;
-
-static inline void intel_scu_notifier_add(struct notifier_block *nb)
-{
- blocking_notifier_chain_register(&intel_scu_notifier, nb);
-}
-
-static inline void intel_scu_notifier_remove(struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&intel_scu_notifier, nb);
-}
-
-static inline int intel_scu_notifier_post(unsigned long v, void *p)
-{
- return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
-}
-
-#define SCU_AVAILABLE 1
-#define SCU_DOWN 2
-
-#endif
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 76d389691b5b..768aa234cbb4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq)
extern int irq_init_percpu_irqstack(unsigned int cpu);
-#define __ARCH_HAS_DO_SOFTIRQ
-
struct irq_desc;
extern void fixup_irqs(void);
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 775816965c6a..9b2a0ff76c73 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -7,100 +7,217 @@
#include <asm/processor.h>
#ifdef CONFIG_X86_64
-static __always_inline bool irqstack_active(void)
-{
- return __this_cpu_read(irq_count) != -1;
-}
-
-void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
-void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
- struct pt_regs *regs);
-void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
- struct irq_desc *desc);
-static __always_inline void __run_on_irqstack(void (*func)(void))
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_on_stack(tos - 8, func, NULL);
- __this_cpu_sub(irq_count, 1);
+/*
+ * Macro to inline switching to an interrupt stack and invoking function
+ * calls from there. The following rules apply:
+ *
+ * - Ordering:
+ *
+ * 1. Write the stack pointer into the top most place of the irq
+ * stack. This ensures that the various unwinders can link back to the
+ * original stack.
+ *
+ * 2. Switch the stack pointer to the top of the irq stack.
+ *
+ * 3. Invoke whatever needs to be done (@asm_call argument)
+ *
+ * 4. Pop the original stack pointer from the top of the irq stack
+ * which brings it back to the original stack where it left off.
+ *
+ * - Function invocation:
+ *
+ * To allow flexible usage of the macro, the actual function code including
+ * the store of the arguments in the call ABI registers is handed in via
+ * the @asm_call argument.
+ *
+ * - Local variables:
+ *
+ * @tos:
+ * The @tos variable holds a pointer to the top of the irq stack and
+ * _must_ be allocated in a non-callee saved register as this is a
+ * restriction coming from objtool.
+ *
+ * Note, that (tos) is both in input and output constraints to ensure
+ * that the compiler does not assume that R11 is left untouched in
+ * case this macro is used in some place where the per cpu interrupt
+ * stack pointer is used again afterwards
+ *
+ * - Function arguments:
+ * The function argument(s), if any, have to be defined in register
+ * variables at the place where this is invoked. Storing the
+ * argument(s) in the proper register(s) is part of the @asm_call
+ *
+ * - Constraints:
+ *
+ * The constraints have to be done very carefully because the compiler
+ * does not know about the assembly call.
+ *
+ * output:
+ * As documented already above the @tos variable is required to be in
+ * the output constraints to make the compiler aware that R11 cannot be
+ * reused after the asm() statement.
+ *
+ * For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is
+ * required as well as this prevents certain creative GCC variants from
+ * misplacing the ASM code.
+ *
+ * input:
+ * - func:
+ * Immediate, which tells the compiler that the function is referenced.
+ *
+ * - tos:
+ * Register. The actual register is defined by the variable declaration.
+ *
+ * - function arguments:
+ * The constraints are handed in via the 'argconstr' argument list. They
+ * describe the register arguments which are used in @asm_call.
+ *
+ * clobbers:
+ * Function calls can clobber anything except the callee-saved
+ * registers. Tell the compiler.
+ */
+#define call_on_irqstack(func, asm_call, argconstr...) \
+{ \
+ register void *tos asm("r11"); \
+ \
+ tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \
+ \
+ asm_inline volatile( \
+ "movq %%rsp, (%[tos]) \n" \
+ "movq %[tos], %%rsp \n" \
+ \
+ asm_call \
+ \
+ "popq %%rsp \n" \
+ \
+ : "+r" (tos), ASM_CALL_CONSTRAINT \
+ : [__func] "i" (func), [tos] "r" (tos) argconstr \
+ : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
+ "memory" \
+ ); \
}
-static __always_inline void
-__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs)
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_sysvec_on_stack(tos - 8, func, regs);
- __this_cpu_sub(irq_count, 1);
+/* Macros to assert type correctness for run_*_on_irqstack macros */
+#define assert_function_type(func, proto) \
+ static_assert(__builtin_types_compatible_p(typeof(&func), proto))
+
+#define assert_arg_type(arg, proto) \
+ static_assert(__builtin_types_compatible_p(typeof(arg), proto))
+
+/*
+ * Macro to invoke system vector and device interrupt C handlers.
+ */
+#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
+{ \
+ /* \
+ * User mode entry and interrupt on the irq stack do not \
+ * switch stacks. If from user mode the task stack is empty. \
+ */ \
+ if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
+ irq_enter_rcu(); \
+ func(c_args); \
+ irq_exit_rcu(); \
+ } else { \
+ /* \
+ * Mark the irq stack inuse _before_ and unmark _after_ \
+ * switching stacks. Interrupts are disabled in both \
+ * places. Invoke the stack switch macro with the call \
+ * sequence which matches the above direct invocation. \
+ */ \
+ __this_cpu_write(hardirq_stack_inuse, true); \
+ call_on_irqstack(func, asm_call, constr); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
+ } \
}
-static __always_inline void
-__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
- struct irq_desc *desc)
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_irq_on_stack(tos - 8, func, desc);
- __this_cpu_sub(irq_count, 1);
+/*
+ * Function call sequence for __call_on_irqstack() for system vectors.
+ *
+ * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
+ * mechanism because these functions are global and cannot be optimized out
+ * when compiling a particular source file which uses one of these macros.
+ *
+ * The argument (regs) does not need to be pushed or stashed in a callee
+ * saved register to be safe vs. the irq_enter_rcu() call because the
+ * clobbers already prevent the compiler from storing it in a callee
+ * clobbered register. As the compiler has to preserve @regs for the final
+ * call to idtentry_exit() anyway, it's likely that it does not cause extra
+ * effort for this asm magic.
+ */
+#define ASM_CALL_SYSVEC \
+ "call irq_enter_rcu \n" \
+ "movq %[arg1], %%rdi \n" \
+ "call %P[__func] \n" \
+ "call irq_exit_rcu \n"
+
+#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
+
+#define run_sysvec_on_irqstack_cond(func, regs) \
+{ \
+ assert_function_type(func, void (*)(struct pt_regs *)); \
+ assert_arg_type(regs, struct pt_regs *); \
+ \
+ call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
+ SYSVEC_CONSTRAINTS, regs); \
}
-#else /* CONFIG_X86_64 */
-static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void (*func)(void)) { }
-static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs) { }
-static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
- struct irq_desc *desc) { }
-#endif /* !CONFIG_X86_64 */
-
-static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_X86_32))
- return false;
- if (!regs)
- return !irqstack_active();
- return !user_mode(regs) && !irqstack_active();
+/*
+ * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
+ * @regs and @vector in callee saved registers.
+ */
+#define ASM_CALL_IRQ \
+ "call irq_enter_rcu \n" \
+ "movq %[arg1], %%rdi \n" \
+ "movl %[arg2], %%esi \n" \
+ "call %P[__func] \n" \
+ "call irq_exit_rcu \n"
+
+#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector)
+
+#define run_irq_on_irqstack_cond(func, regs, vector) \
+{ \
+ assert_function_type(func, void (*)(struct pt_regs *, u32)); \
+ assert_arg_type(regs, struct pt_regs *); \
+ assert_arg_type(vector, u32); \
+ \
+ call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
+ IRQ_CONSTRAINTS, regs, vector); \
}
-
-static __always_inline void run_on_irqstack_cond(void (*func)(void),
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_on_irqstack(func);
- else
- func();
+#define ASM_CALL_SOFTIRQ \
+ "call %P[__func] \n"
+
+/*
+ * Macro to invoke __do_softirq on the irq stack. This is only called from
+ * task context when bottom halfs are about to be reenabled and soft
+ * interrupts are pending to be processed. The interrupt stack cannot be in
+ * use here.
+ */
+#define do_softirq_own_stack() \
+{ \
+ __this_cpu_write(hardirq_stack_inuse, true); \
+ call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
}
-static __always_inline void
-run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_sysvec_on_irqstack(func, regs);
- else
- func(regs);
+#else /* CONFIG_X86_64 */
+/* System vector handlers always run on the stack they interrupted. */
+#define run_sysvec_on_irqstack_cond(func, regs) \
+{ \
+ irq_enter_rcu(); \
+ func(regs); \
+ irq_exit_rcu(); \
}
-static __always_inline void
-run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_irq_on_irqstack(func, desc);
- else
- func(desc);
+/* Switches to the irq stack within func() */
+#define run_irq_on_irqstack_cond(func, regs, vector) \
+{ \
+ irq_enter_rcu(); \
+ func(regs, vector); \
+ irq_exit_rcu(); \
}
+#endif /* !CONFIG_X86_64 */
+
#endif
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
new file mode 100644
index 000000000000..97bbb4a9083a
--- /dev/null
+++ b/arch/x86/include/asm/kfence.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * x86 KFENCE support.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _ASM_X86_KFENCE_H
+#define _ASM_X86_KFENCE_H
+
+#include <linux/bug.h>
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+/* Force 4K pages for __kfence_pool. */
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ unsigned int level;
+
+ if (!lookup_address(addr, &level))
+ return false;
+
+ if (level != PG_LEVEL_4K)
+ set_memory_4k(addr, 1);
+ }
+
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ unsigned int level;
+ pte_t *pte = lookup_address(addr, &level);
+
+ if (WARN_ON(!pte || level != PG_LEVEL_4K))
+ return false;
+
+ /*
+ * We need to avoid IPIs, as we may get KFENCE allocations or faults
+ * with interrupts disabled. Therefore, the below is best-effort, and
+ * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
+ * lazy fault handling takes care of faults after the page is PRESENT.
+ */
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ /* Flush this CPU's TLB. */
+ flush_tlb_one_kernel(addr);
+ return true;
+}
+
+#endif /* _ASM_X86_KFENCE_H */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 355a2ab8fc09..323641097f63 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -93,11 +93,7 @@ KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
KVM_X86_OP_NULL(request_immediate_exit)
KVM_X86_OP(sched_in)
-KVM_X86_OP_NULL(slot_enable_log_dirty)
-KVM_X86_OP_NULL(slot_disable_log_dirty)
-KVM_X86_OP_NULL(flush_log_dirty)
-KVM_X86_OP_NULL(enable_log_dirty_pt_masked)
-KVM_X86_OP_NULL(cpu_dirty_log_size)
+KVM_X86_OP_NULL(update_cpu_dirty_logging)
KVM_X86_OP_NULL(pre_block)
KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 84499aad01a4..0cf71ff2b2e5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -89,6 +89,8 @@
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
+#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
+ KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1007,6 +1009,7 @@ struct kvm_arch {
u32 bsp_vcpu_id;
u64 disabled_quirks;
+ int cpu_dirty_logging_count;
enum kvm_irqchip_mode irqchip_mode;
u8 nr_reserved_ioapic_pins;
@@ -1271,30 +1274,11 @@ struct kvm_x86_ops {
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
/*
- * Arch-specific dirty logging hooks. These hooks are only supposed to
- * be valid if the specific arch has hardware-accelerated dirty logging
- * mechanism. Currently only for PML on VMX.
- *
- * - slot_enable_log_dirty:
- * called when enabling log dirty mode for the slot.
- * - slot_disable_log_dirty:
- * called when disabling log dirty mode for the slot.
- * also called when slot is created with log dirty disabled.
- * - flush_log_dirty:
- * called before reporting dirty_bitmap to userspace.
- * - enable_log_dirty_pt_masked:
- * called when reenabling log dirty for the GFNs in the mask after
- * corresponding bits are cleared in slot->dirty_bitmap.
+ * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
+ * value indicates CPU dirty logging is unsupported or disabled.
*/
- void (*slot_enable_log_dirty)(struct kvm *kvm,
- struct kvm_memory_slot *slot);
- void (*slot_disable_log_dirty)(struct kvm *kvm,
- struct kvm_memory_slot *slot);
- void (*flush_log_dirty)(struct kvm *kvm);
- void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t offset, unsigned long mask);
- int (*cpu_dirty_log_size)(void);
+ int cpu_dirty_log_size;
+ void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
@@ -1437,11 +1421,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_set_dirty(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
-void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
@@ -1613,7 +1592,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
-int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index fdbffec4cfde..5a2baf28a1dc 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -40,6 +40,8 @@
#define ORC_REG_MAX 15
#ifndef __ASSEMBLY__
+#include <asm/byteorder.h>
+
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -51,10 +53,18 @@
struct orc_entry {
s16 sp_offset;
s16 bp_offset;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
unsigned end:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned bp_reg:4;
+ unsigned sp_reg:4;
+ unsigned unused:5;
+ unsigned end:1;
+ unsigned type:2;
+#endif
} __packed;
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
index 16b9f220bdeb..40f92270515b 100644
--- a/arch/x86/include/asm/platform_sst_audio.h
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -10,8 +10,6 @@
#ifndef _PLATFORM_SST_AUDIO_H_
#define _PLATFORM_SST_AUDIO_H_
-#include <linux/sfi.h>
-
#define MAX_NUM_STREAMS_MRFLD 25
#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c20a52b5534b..dc6d149bf851 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -426,8 +426,6 @@ struct irq_stack {
char stack[IRQ_STACK_SIZE];
} __aligned(IRQ_STACK_SIZE);
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else
@@ -454,7 +452,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
}
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(void *, hardirq_stack_ptr);
+DECLARE_PER_CPU(bool, hardirq_stack_inuse);
extern asmlinkage void ignore_sysret(void);
/* Save actual FS/GS selectors and bases to current->thread */
@@ -473,9 +472,9 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
-/* Per CPU softirq stack pointer */
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
-#endif /* X86_64 */
+#endif /* !X86_64 */
extern unsigned int fpu_kernel_xstate_size;
extern unsigned int fpu_user_xstate_size;
diff --git a/arch/x86/include/asm/softirq_stack.h b/arch/x86/include/asm/softirq_stack.h
new file mode 100644
index 000000000000..889d53d6a0e1
--- /dev/null
+++ b/arch/x86/include/asm/softirq_stack.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SOFTIRQ_STACK_H
+#define _ASM_X86_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_X86_64
+# include <asm/irq_stack.h>
+#else
+# include <asm-generic/softirq_stack.h>
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 664d4610d700..8e574c0afef8 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -48,17 +48,8 @@
UNWIND_HINT_REGS base=\base offset=\offset partial=1
.endm
-.macro UNWIND_HINT_FUNC sp_offset=8
- UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL
-.endm
-
-/*
- * RET_OFFSET: Used on instructions that terminate a function; mostly RETURN
- * and sibling calls. On these, sp_offset denotes the expected offset from
- * initial_func_cfi.
- */
-.macro UNWIND_HINT_RET_OFFSET sp_offset=8
- UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset
+.macro UNWIND_HINT_FUNC
+ UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
.endm
#endif /* __ASSEMBLY__ */