From 1fd02f6605b855b4af2883f29a2abc88bdf17857 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 30 Apr 2022 20:56:54 +0200 Subject: powerpc: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Reviewed-by: Joel Stanley Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220430185654.5855-1-Julia.Lawall@inria.fr --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/book3s_64_vio_hv.c | 2 +- arch/powerpc/kvm/book3s_emulate.c | 2 +- arch/powerpc/kvm/book3s_hv_p9_entry.c | 2 +- arch/powerpc/kvm/book3s_hv_uvmem.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_xics.c | 2 +- arch/powerpc/kvm/book3s_xive.c | 6 +++--- arch/powerpc/kvm/e500mc.c | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 0aeb51738ca9..1137c4df726c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -58,7 +58,7 @@ struct kvm_resize_hpt { /* Possible values and their usage: * <0 an error occurred during allocation, * -EBUSY allocation is in the progress, - * 0 allocation made successfuly. + * 0 allocation made successfully. */ int error; diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index fdeda6a9cff4..fdcc7b287dd8 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -453,7 +453,7 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, * we are doing this on secondary cpus and current task there * is not the hypervisor. Also this is safe against THP in the * host, because an IPI to primary thread will wait for the secondary - * to exit which will agains result in the below page table walk + * to exit which will again result in the below page table walk * to finish. */ /* an rmap lock won't make it safe. because that just ensure hash diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index fdb57be71aa6..5bbfb2eed127 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -268,7 +268,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, /* * add rules to fit in ISA specification regarding TM - * state transistion in TM disable/Suspended state, + * state transition in TM disable/Suspended state, * and target TM state is TM inactive(00) state. (the * change should be suppressed). */ diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index a28e5b3daabd..ac38c1cad378 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -379,7 +379,7 @@ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu, { /* * current->thread.xxx registers must all be restored to host - * values before a potential context switch, othrewise the context + * values before a potential context switch, otherwise the context * switch itself will overwrite current->thread.xxx with the values * from the guest SPRs. */ diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 45c993dd05f5..0f66f7fbfb80 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -120,7 +120,7 @@ static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); * content is un-encrypted. * * (c) Normal - The GFN is a normal. The GFN is associated with - * a normal VM. The contents of the GFN is accesible to + * a normal VM. The contents of the GFN is accessible to * the Hypervisor. Its content is never encrypted. * * States of a VM. diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7bf9e6ca5c2d..d6abed6e51e6 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1287,7 +1287,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) /* Get last sc for papr */ if (vcpu->arch.papr_enabled) { - /* The sc instuction points SRR0 to the next inst */ + /* The sc instruction points SRR0 to the next inst */ emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); if (emul != EMULATE_DONE) { kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index ab6d37d78c62..589a8f257120 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -462,7 +462,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, * new guy. We cannot assume that the rejected interrupt is less * favored than the new one, and thus doesn't need to be delivered, * because by the time we exit icp_try_to_deliver() the target - * processor may well have alrady consumed & completed it, and thus + * processor may well have already consumed & completed it, and thus * the rejected interrupt might actually be already acceptable. */ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index c0ce5531d9bc..24d434f1f012 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -124,7 +124,7 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) * interrupt might have fired and be on its way to the * host queue while we mask it, and if we unmask it * early enough (re-cede right away), there is a - * theorical possibility that it fires again, thus + * theoretical possibility that it fires again, thus * landing in the target queue more than once which is * a big no-no. * @@ -622,7 +622,7 @@ static int xive_target_interrupt(struct kvm *kvm, /* * Targetting rules: In order to avoid losing track of - * pending interrupts accross mask and unmask, which would + * pending interrupts across mask and unmask, which would * allow queue overflows, we implement the following rules: * * - Unless it was never enabled (or we run out of capacity) @@ -1073,7 +1073,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, /* * If old_p is set, the interrupt is pending, we switch it to * PQ=11. This will force a resend in the host so the interrupt - * isn't lost to whatver host driver may pick it up + * isn't lost to whatever host driver may pick it up */ if (state->old_p) xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index fa0d8dbbe484..4ff1372e48d4 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -309,7 +309,7 @@ static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu) BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0); vcpu_e500 = to_e500(vcpu); - /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ + /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */ vcpu->arch.oldpir = 0xffffffff; err = kvmppc_e500_tlb_init(vcpu_e500); -- cgit v1.2.3