diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/frv/include/asm/processor.h | 3 | ||||
-rw-r--r-- | arch/ia64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 1 | ||||
-rw-r--r-- | arch/microblaze/include/uapi/asm/unistd.h | 3 | ||||
-rw-r--r-- | arch/microblaze/kernel/syscall_table.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 11 |
9 files changed, 19 insertions, 13 deletions
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index a34f309e5801..6554e78893f2 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -129,7 +129,8 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) #define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp) -#define cpu_relax() barrier() +#define cpu_relax() barrier() +#define cpu_relax_lowlatency() cpu_relax() /* data cache prefetch */ #define ARCH_HAS_PREFETCH diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 4254f5d3218c..10a14ead70b9 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 316 /* length of syscall table */ +#define NR_syscalls 317 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 99801c3be914..6a65bb7d0657 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -329,5 +329,6 @@ #define __NR_sched_getattr 1337 #define __NR_renameat2 1338 #define __NR_getrandom 1339 +#define __NR_memfd_create 1339 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 4c13837a9269..01edf242eb29 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1777,6 +1777,7 @@ sys_call_table: data8 sys_sched_getattr data8 sys_renameat2 data8 sys_getrandom + data8 sys_memfd_create // 1340 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index 4e1ddc930a68..1c2380bf8fe6 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -399,5 +399,8 @@ #define __NR_sched_setattr 381 #define __NR_sched_getattr 382 #define __NR_renameat2 383 +#define __NR_seccomp 384 +#define __NR_getrandom 385 +#define __NR_memfd_create 386 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 1a23d5d5480c..de59ee1d7010 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -384,3 +384,6 @@ ENTRY(sys_call_table) .long sys_sched_setattr .long sys_sched_getattr .long sys_renameat2 + .long sys_seccomp + .long sys_getrandom /* 385 */ + .long sys_memfd_create diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 329d7fdd0a6a..b9615ba5b083 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma() ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); if (!ri) return NULL; - page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages)); + page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages)); if (!page) goto err_out; atomic_set(&ri->use_count, 1); @@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages) { unsigned long align_pages = HPT_ALIGN_PAGES; - VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); + VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); /* Old CPUs require HPT aligned on a multiple of its size */ if (!cpu_has_feature(CPU_FTR_ARCH_206)) align_pages = nr_pages; - return cma_alloc(kvm_cma, nr_pages, get_order(align_pages)); + return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages)); } EXPORT_SYMBOL_GPL(kvm_alloc_hpt); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 572460175ba5..7c492ed9087b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 -#define KVM_NR_VAR_MTRR 10 +#define KVM_NR_VAR_MTRR 8 #define ASYNC_PF_PER_VCPU 64 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 56657b0bb3bb..03954f7900f5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto exception; break; case VCPU_SREG_CS: - if (in_task_switch && rpl != dpl) - goto exception; - if (!(seg_desc.type & 8)) goto exception; @@ -4394,8 +4391,11 @@ done_prefixes: ctxt->execute = opcode.u.execute; + if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) + return EMULATION_FAILED; + if (unlikely(ctxt->d & - (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { + (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. @@ -4406,9 +4406,6 @@ done_prefixes: if (ctxt->d & NotImpl) return EMULATION_FAILED; - if (!(ctxt->d & EmulateOnUD) && ctxt->ud) - return EMULATION_FAILED; - if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) ctxt->op_bytes = 8; |