From c04fa5831d4d89dfbc88406f4a46f9846841a560 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 14 Aug 2014 15:03:07 +1000 Subject: PC, KVM, CMA: Fix regression caused by wrong get_order() use fc95ca7284bc54953165cba76c3228bd2cdb9591 claims that there is no functional change but this is not true as it calls get_order() (which takes bytes) where it should have called order_base_2() and the kernel stops on VM_BUG_ON(). This replaces get_order() with order_base_2() (round-up version of ilog2). Suggested-by: Paul Mackerras Cc: Alexander Graf Cc: Joonsoo Kim Cc: Benjamin Herrenschmidt Reviewed-by: Aneesh Kumar K.V Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s_hv_builtin.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c') diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 329d7fdd0a6a..b9615ba5b083 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma() ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); if (!ri) return NULL; - page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages)); + page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages)); if (!page) goto err_out; atomic_set(&ri->use_count, 1); @@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages) { unsigned long align_pages = HPT_ALIGN_PAGES; - VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); + VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); /* Old CPUs require HPT aligned on a multiple of its size */ if (!cpu_has_feature(CPU_FTR_ARCH_206)) align_pages = nr_pages; - return cma_alloc(kvm_cma, nr_pages, get_order(align_pages)); + return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages)); } EXPORT_SYMBOL_GPL(kvm_alloc_hpt); -- cgit v1.2.3