From ebc97a52b5d6cd5fb0c15a3fc9cdd6eb924646a1 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 23 Aug 2022 00:46:36 +0000 Subject: mm: add NR_SECONDARY_PAGETABLE to count secondary page table uses. We keep track of several kernel memory stats (total kernel memory, page tables, stack, vmalloc, etc) on multiple levels (global, per-node, per-memcg, etc). These stats give insights to users to how much memory is used by the kernel and for what purposes. Currently, memory used by KVM mmu is not accounted in any of those kernel memory stats. This patch series accounts the memory pages used by KVM for page tables in those stats in a new NR_SECONDARY_PAGETABLE stat. This stat can be later extended to account for other types of secondary pages tables (e.g. iommu page tables). KVM has a decent number of large allocations that aren't for page tables, but for most of them, the number/size of those allocations scales linearly with either the number of vCPUs or the amount of memory assigned to the VM. KVM's secondary page table allocations do not scale linearly, especially when nested virtualization is in use. From a KVM perspective, NR_SECONDARY_PAGETABLE will scale with KVM's per-VM pages_{4k,2m,1g} stats unless the guest is doing something bizarre (e.g. accessing only 4kb chunks of 2mb pages so that KVM is forced to allocate a large number of page tables even though the guest isn't accessing that much memory). However, someone would need to either understand how KVM works to make that connection, or know (or be told) to go look at KVM's stats if they're running VMs to better decipher the stats. Furthermore, having NR_PAGETABLE side-by-side with NR_SECONDARY_PAGETABLE is informative. For example, when backing a VM with THP vs. HugeTLB, NR_SECONDARY_PAGETABLE is roughly the same, but NR_PAGETABLE is an order of magnitude higher with THP. So having this stat will at the very least prove to be useful for understanding tradeoffs between VM backing types, and likely even steer folks towards potential optimizations. The original discussion with more details about the rationale: https://lore.kernel.org/all/87ilqoi77b.wl-maz@kernel.org This stat will be used by subsequent patches to count KVM mmu memory usage. Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20220823004639.2387269-2-yosryahmed@google.com Signed-off-by: Sean Christopherson --- include/linux/mmzone.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e24b40c52468..355d842d2731 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -216,6 +216,7 @@ enum node_stat_item { NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ + NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */ #ifdef CONFIG_SWAP NR_SWAPCACHE, #endif -- cgit v1.2.3 From 43a063cab325ee7cc50349967e536b3cd4e57f03 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 23 Aug 2022 00:46:37 +0000 Subject: KVM: x86/mmu: count KVM mmu usage in secondary pagetable stats. Count the pages used by KVM mmu on x86 in memory stats under secondary pagetable stats (e.g. "SecPageTables" in /proc/meminfo) to give better visibility into the memory consumption of KVM mmu in a similar way to how normal user page tables are accounted. Add the inner helper in common KVM, ARM will also use it to count stats in a future commit. Signed-off-by: Yosry Ahmed Reviewed-by: Sean Christopherson Acked-by: Marc Zyngier # generic KVM changes Link: https://lore.kernel.org/r/20220823004639.2387269-3-yosryahmed@google.com Link: https://lore.kernel.org/r/20220823004639.2387269-4-yosryahmed@google.com [sean: squash x86 usage to workaround modpost issues] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 16 ++++++++++++++-- arch/x86/kvm/mmu/tdp_mmu.c | 12 ++++++++++++ include/linux/kvm_host.h | 13 +++++++++++++ 3 files changed, 39 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d25d55b1f0b5..32b60a6b83bd 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1665,6 +1665,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) percpu_counter_add(&kvm_total_used_mmu_pages, nr); } +static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + kvm_mod_used_mmu_pages(kvm, +1); + kvm_account_pgtable_pages((void *)sp->spt, +1); +} + +static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + kvm_mod_used_mmu_pages(kvm, -1); + kvm_account_pgtable_pages((void *)sp->spt, -1); +} + static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) { MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); @@ -2122,7 +2134,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, */ sp->mmu_valid_gen = kvm->arch.mmu_valid_gen; list_add(&sp->link, &kvm->arch.active_mmu_pages); - kvm_mod_used_mmu_pages(kvm, +1); + kvm_account_mmu_page(kvm, sp); sp->gfn = gfn; sp->role = role; @@ -2456,7 +2468,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, list_add(&sp->link, invalid_list); else list_move(&sp->link, invalid_list); - kvm_mod_used_mmu_pages(kvm, -1); + kvm_unaccount_mmu_page(kvm, sp); } else { /* * Remove the active root from the active page list, the root diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index bf2ccf9debca..672f0432d777 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, } } +static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + kvm_account_pgtable_pages((void *)sp->spt, +1); +} + +static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + kvm_account_pgtable_pages((void *)sp->spt, -1); +} + /** * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages * @@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, bool shared) { + tdp_unaccount_mmu_page(kvm, sp); if (shared) spin_lock(&kvm->arch.tdp_mmu_pages_lock); else @@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, if (account_nx) account_huge_nx_page(kvm, sp); spin_unlock(&kvm->arch.tdp_mmu_pages_lock); + tdp_account_mmu_page(kvm, sp); return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f4519d3689e1..04c7e5f2f727 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2247,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) } #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ +/* + * If more than one page is being (un)accounted, @virt must be the address of + * the first page of a block of pages what were allocated together (i.e + * accounted together). + * + * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state() + * is thread-safe. + */ +static inline void kvm_account_pgtable_pages(void *virt, int nr) +{ + mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr); +} + /* * This defines how many reserved entries we want to keep before we * kick the vcpu to the userspace to avoid dirty ring full. This -- cgit v1.2.3 From c59fb127583869350256656b7ed848c398bef879 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 21 Sep 2022 00:32:01 +0000 Subject: KVM: remove KVM_REQ_UNHALT KVM_REQ_UNHALT is now unnecessary because it is replaced by the return value of kvm_vcpu_block/kvm_vcpu_halt. Remove it. No functional change intended. Signed-off-by: Paolo Bonzini Signed-off-by: Sean Christopherson Acked-by: Marc Zyngier Message-Id: <20220921003201.1441511-13-seanjc@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/vcpu-requests.rst | 28 +--------------------------- arch/arm64/kvm/arm.c | 1 - arch/mips/kvm/emulate.c | 1 - arch/powerpc/kvm/book3s_pr.c | 1 - arch/powerpc/kvm/book3s_pr_papr.c | 1 - arch/powerpc/kvm/booke.c | 1 - arch/powerpc/kvm/powerpc.c | 1 - arch/riscv/kvm/vcpu_insn.c | 1 - arch/s390/kvm/kvm-s390.c | 2 -- arch/x86/kvm/x86.c | 3 --- arch/x86/kvm/xen.c | 1 - include/linux/kvm_host.h | 3 +-- virt/kvm/kvm_main.c | 4 +--- 13 files changed, 3 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index 31f62b64e07b..87f04c1fa53d 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -97,7 +97,7 @@ VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap. This means general bitops, like those documented in [atomic-ops]_ could also be used, e.g. :: - clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests); + clear_bit(KVM_REQ_UNBLOCK & KVM_REQUEST_MASK, &vcpu->requests); However, VCPU request users should refrain from doing so, as it would break the abstraction. The first 8 bits are reserved for architecture @@ -126,17 +126,6 @@ KVM_REQ_UNBLOCK or in order to update the interrupt routing and ensure that assigned devices will wake up the vCPU. -KVM_REQ_UNHALT - - This request may be made from the KVM common function kvm_vcpu_block(), - which is used to emulate an instruction that causes a CPU to halt until - one of an architectural specific set of events and/or interrupts is - received (determined by checking kvm_arch_vcpu_runnable()). When that - event or interrupt arrives kvm_vcpu_block() makes the request. This is - in contrast to when kvm_vcpu_block() returns due to any other reason, - such as a pending signal, which does not indicate the VCPU's halt - emulation should stop, and therefore does not make the request. - KVM_REQ_OUTSIDE_GUEST_MODE This "request" ensures the target vCPU has exited guest mode prior to the @@ -297,21 +286,6 @@ architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable() to check if it should awaken. One reason to do so is to provide architectures a function where requests may be checked if necessary. -Clearing Requests ------------------ - -Generally it only makes sense for the receiving VCPU thread to clear a -request. However, in some circumstances, such as when the requesting -thread and the receiving VCPU thread are executed serially, such as when -they are the same thread, or when they are using some form of concurrency -control to temporarily execute synchronously, then it's possible to know -that the request may be cleared immediately, rather than waiting for the -receiving VCPU thread to handle the request in VCPU RUN. The only current -examples of this are kvm_vcpu_block() calls made by VCPUs to block -themselves. A possible side-effect of that call is to make the -KVM_REQ_UNHALT request, which may then be cleared immediately when the -VCPU returns from the call. - References ========== diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 2ff0ef62abad..4f949b64fdc9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -666,7 +666,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) kvm_vcpu_halt(vcpu); vcpu_clear_flag(vcpu, IN_WFIT); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); preempt_disable(); vgic_v4_load(vcpu); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 1d7c56defe93..edaec93a1a1f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -958,7 +958,6 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) * We are runnable, then definitely go off to user space to * check if any I/O interrupts are pending. */ - kvm_clear_request(KVM_REQ_UNHALT, vcpu); if (kvm_arch_vcpu_runnable(vcpu)) vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d6abed6e51e6..9fc4dd8f66eb 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -499,7 +499,6 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { kvm_vcpu_halt(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; /* Unset POW bit after we woke up */ diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index a1f2978b2a86..b2c89e850d7a 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -393,7 +393,6 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvm_vcpu_halt(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; return EMULATE_DONE; case H_LOGICAL_CI_LOAD: diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 06c5830a93f9..7b4920e9fd26 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -719,7 +719,6 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); kvm_vcpu_halt(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); hard_irq_disable(); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index fb1490761c87..ec9c1e3c2ff4 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -239,7 +239,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_halt(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); break; default: r = EV_UNIMPLEMENTED; diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 7eb90a47b571..0bb52761a3f7 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -191,7 +191,6 @@ void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) kvm_vcpu_srcu_read_unlock(vcpu); kvm_vcpu_halt(vcpu); kvm_vcpu_srcu_read_lock(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); } } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index edfd4bbd0cba..aa39ea4582bd 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4343,8 +4343,6 @@ retry: goto retry; } - /* nothing to do, just clear the request */ - kvm_clear_request(KVM_REQ_UNHALT, vcpu); /* we left the vsie handler, nothing to do, just clear the request */ kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 20c56be1fcf1..eb9d2c23fb04 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10813,8 +10813,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu) if (hv_timer) kvm_lapic_switch_to_hv_timer(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); - /* * If the vCPU is not runnable, a signal or another host event * of some kind is pending; service it without changing the @@ -11034,7 +11032,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) r = 0; goto out; } - kvm_clear_request(KVM_REQ_UNHALT, vcpu); r = -EAGAIN; if (signal_pending(current)) { r = -EINTR; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 280cb5dc7341..93c628d3e3a9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1065,7 +1065,6 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, del_timer(&vcpu->arch.xen.poll_timer); vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - kvm_clear_request(KVM_REQ_UNHALT, vcpu); } vcpu->arch.xen.poll_evtchn = 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 04c7e5f2f727..32f259fa5801 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -151,12 +151,11 @@ static inline bool is_error_page(struct page *page) #define KVM_REQUEST_NO_ACTION BIT(10) /* * Architecture-independent vcpu->requests bit members - * Bits 4-7 are reserved for more arch-independent bits. + * Bits 3-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 -#define KVM_REQ_UNHALT 3 #define KVM_REQUEST_ARCH_BASE 8 /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dcf47da44844..26383e63d9dd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3409,10 +3409,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) int ret = -EINTR; int idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvm_arch_vcpu_runnable(vcpu)) { - kvm_make_request(KVM_REQ_UNHALT, vcpu); + if (kvm_arch_vcpu_runnable(vcpu)) goto out; - } if (kvm_cpu_has_pending_timer(vcpu)) goto out; if (signal_pending(current)) -- cgit v1.2.3