summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/module.c2
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/alpha/kernel/traps.c4
-rw-r--r--arch/arc/kernel/disasm.c2
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c10
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/mach-ep93xx/crunch.c2
-rw-r--r--arch/arm/mach-mmp/pm-mmp2.c8
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c10
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/proc-v7-bugs.c2
-rw-r--r--arch/arm/plat-omap/dma.c6
-rw-r--r--arch/arm/probes/decode.c2
-rw-r--r--arch/arm/probes/kprobes/core.c2
-rw-r--r--arch/arm64/Makefile14
-rw-r--r--arch/arm64/include/asm/compiler.h6
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h43
-rw-r--r--arch/arm64/include/asm/tlbflush.h6
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm64/kernel/image-vars.h4
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S65
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h60
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h39
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c5
-rw-r--r--arch/arm64/mm/context.c2
-rw-r--r--arch/c6x/kernel/signal.c4
-rw-r--r--arch/csky/kernel/signal.c2
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/hexagon/kernel/module.c2
-rw-r--r--arch/hexagon/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/m68k/atari/atakeyb.c2
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/mac/config.c2
-rw-r--r--arch/m68k/mac/via.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/microblaze/kernel/signal.c2
-rw-r--r--arch/mips/include/asm/unroll.h64
-rw-r--r--arch/nds32/kernel/fpu.c12
-rw-r--r--arch/nds32/kernel/signal.c4
-rw-r--r--arch/openrisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/powerpc/Kconfig12
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/cputable.h10
-rw-r--r--arch/powerpc/include/asm/mman.h31
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h2
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c19
-rw-r--r--arch/powerpc/perf/imc-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c4
-rw-r--r--arch/s390/include/asm/percpu.h28
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/sh/drivers/platform_early.c2
-rw-r--r--arch/sh/kernel/disassemble.c4
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/auxio_64.c1
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/prom_32.c2
-rw-r--r--arch/sparc/kernel/signal32.c4
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/signal_64.c4
-rw-r--r--arch/sparc/math-emu/math_32.c8
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/boot/cmdline.c4
-rw-r--r--arch/x86/boot/compressed/kaslr.c2
-rw-r--r--arch/x86/events/intel/core.c6
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/probe_32.c2
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c2
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c2
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/mpparse.c4
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/svm/svm.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c11
-rw-r--r--arch/x86/lib/cmdline.c8
-rw-r--r--arch/x86/lib/insn-eval.c6
-rw-r--r--arch/x86/math-emu/errors.c2
-rw-r--r--arch/x86/math-emu/fpu_trig.c2
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/xtensa/kernel/signal.c2
134 files changed, 506 insertions, 334 deletions
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ac110ae8f978..5b60c248de9e 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -212,7 +212,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
- /* FALLTHRU */
+ fallthrough;
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index a813020d2f11..15bc9d1e79f4 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -453,7 +453,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
regs->r0 = EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 49754e07e04f..921d4b6e4d95 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -883,7 +883,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x26: /* sts */
fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
- /* FALLTHRU */
+ fallthrough;
case 0x2c: /* stl */
__asm__ __volatile__(
@@ -911,7 +911,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
case 0x27: /* stt */
fake_reg = alpha_read_fp_reg(reg);
- /* FALLTHRU */
+ fallthrough;
case 0x2d: /* stq */
__asm__ __volatile__(
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index d04837d91b40..03f8b1be0c3a 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -339,7 +339,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
- /* intentional fall-through */
+ fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 3d57ed0d8535..8222f8c54690 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -321,7 +321,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
regs->r0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
/*
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index f87758a6851b..74ad4256022e 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,7 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
- /* Fall through */
+ fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
@@ -827,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -835,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7fff88e61252..7a4853b1213a 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp,
if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
- /* Else, fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Else, fall through */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
@@ -884,7 +884,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
- /* Fall through */
+ fallthrough;
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
@@ -933,7 +933,7 @@ static bool core_has_os_save_restore(void)
ARM_DBG_READ(c1, c1, 4, oslsr);
if (oslsr & ARM_OSLSR_OSLM0)
return true;
- /* Else, fall through */
+ fallthrough;
default:
return false;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index c9dc912b83f0..c1892f733f20 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart -= 2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 1c05c5bf7e5c..757032d82f63 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -49,7 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
- /* Fall through */
+ fallthrough;
case THREAD_NOTIFY_EXIT:
crunch_task_release(thread);
diff --git a/arch/arm/mach-mmp/pm-mmp2.c b/arch/arm/mach-mmp/pm-mmp2.c
index 2d86381e152d..7a6f74c32d42 100644
--- a/arch/arm/mach-mmp/pm-mmp2.c
+++ b/arch/arm/mach-mmp/pm-mmp2.c
@@ -123,19 +123,19 @@ void mmp2_pm_enter_lowpower_mode(int state)
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_CHIP_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN;
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */
apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */
apcr |= MPMU_PCR_PJ_SPSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */
idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK;
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 69ebe18ff209..1d71d73c1862 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -145,23 +145,23 @@ void pxa910_pm_enter_lowpower_mode(int state)
case POWER_MODE_UDR:
/* only shutdown APB in UDR */
apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3)
| APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3);
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_INTIDLE:
break;
}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 1d119b974f5f..59755b5a1ad7 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -396,7 +396,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "3.1";
break;
case 7:
- /* FALLTHROUGH */
default:
/* Use the latest known revision as default */
omap_revision = OMAP3430_REV_ES3_1_2;
@@ -416,7 +415,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
default:
omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
@@ -435,7 +433,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = OMAP3630_REV_ES1_2;
cpu_rev = "1.2";
@@ -456,7 +453,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8168_REV_ES2_1;
cpu_rev = "2.1";
@@ -473,7 +469,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM335X_REV_ES2_1;
cpu_rev = "2.1";
@@ -491,7 +486,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM437X_REV_ES1_2;
cpu_rev = "1.2";
@@ -502,7 +496,6 @@ void __init omap3xxx_check_revision(void)
case 0xb968:
switch (rev) {
case 0:
- /* FALLTHROUGH */
case 1:
omap_revision = TI8148_REV_ES1_0;
cpu_rev = "1.0";
@@ -512,7 +505,6 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8148_REV_ES2_1;
cpu_rev = "2.1";
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 6b4548f3b57f..fc7bb2ca1672 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -240,7 +240,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
omap_auxdata_legacy_init(dev);
- /* fall through */
+ fallthrough;
default:
od = to_omap_device(pdev);
if (od)
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index d13344b2ddcd..87cb47220e82 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -624,7 +624,7 @@ static void __init dns323_init(void)
dns323ab_leds[0].active_low = 1;
gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
- /* Fall through */
+ fallthrough;
case DNS323_REV_B1:
i2c_register_board_info(0, dns323ab_i2c_devices,
ARRAY_SIZE(dns323ab_i2c_devices));
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index ea2c84214bac..d23970bd638d 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,7 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
- /* Fall through - ??? */
+ fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
default:
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 76a65df42d10..d5c805adf7a8 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
switch (err) {
case -ENOSYS:
tegra_cpu_reset_handler_set(reset_address);
- /* fall through */
+ fallthrough;
case 0:
is_enabled = true;
break;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index f4bfc1cac91a..ea81e89e7740 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -694,7 +694,7 @@ thumb2arm(u16 tinstr)
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
- /* Else, fall through - for illegal instruction case */
+ fallthrough; /* for illegal instruction case */
default:
return BAD_INSTR;
@@ -750,7 +750,7 @@ do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
- /* Fall through */
+ fallthrough;
case 0xe940:
case 0xe9c0:
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index c0fbfca5da8b..114c05ab4dd9 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -71,7 +71,7 @@ static void cpu_v7_spectre_init(void)
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
- /* fallthrough */
+ fallthrough;
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b2e9e822426f..1eb59003bdec 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -309,14 +309,14 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
*/
- /* fall through */
+ fallthrough;
case OMAP_DMA_DATA_BURST_16:
if (dma_omap2plus()) {
burst = 0x3;
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
BUG();
}
@@ -393,7 +393,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
diff --git a/arch/arm/probes/decode.c b/arch/arm/probes/decode.c
index fe81a9c21f2d..c84053a81358 100644
--- a/arch/arm/probes/decode.c
+++ b/arch/arm/probes/decode.c
@@ -307,7 +307,7 @@ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify)
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
- /* fall through... */
+ fallthrough;
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 90b5bc723c83..feefa2055eba 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -280,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
- /* fall through */
+ fallthrough;
default:
/* impossible cases */
BUG();
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b45f0124cc16..130569f90c54 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -82,8 +82,8 @@ endif
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
-ifneq ($(CONFIG_AS_HAS_ARMV8_4), y)
-branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+ifeq ($(CONFIG_AS_HAS_PAC), y)
+asm-arch := armv8.3-a
endif
endif
@@ -91,7 +91,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y)
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
# make sure to pass the newest target architecture to -march.
-KBUILD_CFLAGS += -Wa,-march=armv8.4-a
+asm-arch := armv8.4-a
+endif
+
+ifdef asm-arch
+KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
+ -DARM64_ASM_ARCH='"$(asm-arch)"'
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
@@ -165,7 +170,8 @@ zinstall install:
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
+ $(if $(CONFIG_COMPAT_VDSO), \
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 51a7ce87cdfe..6fb2e6bcc392 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -2,6 +2,12 @@
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
+#ifdef ARM64_ASM_ARCH
+#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n"
+#else
+#define ARM64_ASM_PREAMBLE
+#endif
+
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 51c1d9918999..1da8e3dc4455 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -71,11 +71,12 @@
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO)
+ HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index fb1a922b31ba..6f98fbd0ac81 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
*__hyp_this_cpu_ptr(sym); \
})
+#define __KVM_EXTABLE(from, to) \
+ " .pushsection __kvm_ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
+
+#define __kvm_at(at_op, addr) \
+( { \
+ int __kvm_at_err = 0; \
+ u64 spsr, elr; \
+ asm volatile( \
+ " mrs %1, spsr_el2\n" \
+ " mrs %2, elr_el2\n" \
+ "1: at "at_op", %3\n" \
+ " isb\n" \
+ " b 9f\n" \
+ "2: msr spsr_el2, %1\n" \
+ " msr elr_el2, %2\n" \
+ " mov %w0, %4\n" \
+ "9:\n" \
+ __KVM_EXTABLE(1b, 2b) \
+ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
+ : "r" (addr), "i" (-EFAULT)); \
+ __kvm_at_err; \
+} )
+
+
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
@@ -193,6 +221,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+ .pushsection __kvm_ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+.endm
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index d493174415db..cc3f5a33ff9c 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,14 +28,16 @@
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
-#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
+#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
-#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
+#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 455966401102..a85174d05473 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -322,7 +322,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
- /* fall through */
+ fallthrough;
default:
if (region->attribute & EFI_MEMORY_WB)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a389b999482e..6424584be01e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -686,7 +686,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
- /* Fallthrough */
+ fallthrough;
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 393c6fb1f1cb..d0076c2159e6 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -327,7 +327,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
default:
- /* Fallthrough */
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index af234a1e08b7..712e97c03e54 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
* level.
*/
enable_debug_monitors(dbg_el);
- /* Fall through */
+ fallthrough;
case HW_BREAKPOINT_RESTORE:
/* Setup the address register. */
write_wb_reg(val_reg, i, info->address);
@@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Fallthrough */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Fallthrough */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 9e897c500237..8982b68289b7 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
KVM_NVHE_ALIAS(gic_pmr_sync);
#endif
+/* EL2 exception handling */
+KVM_NVHE_ALIAS(__start___kvm_ex_table);
+KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 1cd1a4d0ed30..2a1ad95d9b2c 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 03957a1ae6c0..355ee9eed4dd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -151,7 +151,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
break;
}
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
+ fallthrough;
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ec8e894684a7..7cba7623fcec 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -20,6 +20,13 @@ ENTRY(_text)
jiffies = jiffies_64;
+
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
@@ -35,6 +42,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
+ HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fe6c7d79309d..5d690d60ccad 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
- /* fall through */
+ fallthrough;
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_BKPT32:
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ee32a7743389..76e7eaf4675e 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -196,20 +196,23 @@ alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
- .global abort_guest_exit_start
abort_guest_exit_start:
isb
- .global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
+ ret
+
+ _kvm_extable abort_guest_exit_start, 9997f
+ _kvm_extable abort_guest_exit_end, 9997f
+9997:
+ msr daifset, #4 // Mask aborts
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // If the exception took place, restore the EL1 exception
- // context so that we can report some information.
- // Merge the exception code with the SError pending bit.
- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ // restore the EL1 exception context so that we can report some
+ // information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 689fccbc9de7..46b4dab933d0 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,30 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
+.macro save_caller_saved_regs_vect
+ /* x0 and x1 were saved in the vector entry */
+ stp x2, x3, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
.text
.macro do_el2_call
@@ -143,13 +167,19 @@ el1_error:
b __guest_exit
el2_sync:
- /* Check for illegal exception return, otherwise panic */
+ /* Check for illegal exception return */
mrs x0, spsr_el2
+ tbnz x0, #20, 1f
- /* if this was something else, then panic! */
- tst x0, #PSR_IL_BIT
- b.eq __hyp_panic
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+ bl kvm_unexpected_el2_exception
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
+ eret
+
+1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
@@ -157,27 +187,14 @@ el2_sync:
el2_error:
- ldp x0, x1, [sp], #16
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+
+ bl kvm_unexpected_el2_exception
+
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
- /*
- * Only two possibilities:
- * 1) Either we come from the exit path, having just unmasked
- * PSTATE.A: change the return code to an EL2 fault, and
- * carry on, as we're already in a sane state to handle it.
- * 2) Or we come from anywhere else, and that's a bug: we panic.
- *
- * For (1), x0 contains the original return code and x1 doesn't
- * contain anything meaningful at that stage. We can reuse them
- * as temp registers.
- * For (2), who cares?
- */
- mrs x0, elr_el2
- adr x1, abort_guest_exit_start
- cmp x0, x1
- adr x1, abort_guest_exit_end
- ccmp x0, x1, #4, ne
- b.ne __hyp_panic
- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
sb
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 0297dc63988c..5e28ea6aa097 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -21,70 +21,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: ptr[14] = read_debug(reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: ptr[13] = read_debug(reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: ptr[12] = read_debug(reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: ptr[11] = read_debug(reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: ptr[10] = read_debug(reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: ptr[9] = read_debug(reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: ptr[8] = read_debug(reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: ptr[7] = read_debug(reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: ptr[6] = read_debug(reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: ptr[5] = read_debug(reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: ptr[4] = read_debug(reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: ptr[3] = read_debug(reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: ptr[2] = read_debug(reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: ptr[1] = read_debug(reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: write_debug(ptr[14], reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: write_debug(ptr[13], reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: write_debug(ptr[12], reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: write_debug(ptr[11], reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: write_debug(ptr[10], reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: write_debug(ptr[9], reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: write_debug(ptr[8], reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: write_debug(ptr[7], reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: write_debug(ptr[6], reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: write_debug(ptr[5], reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: write_debug(ptr[4], reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: write_debug(ptr[3], reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: write_debug(ptr[2], reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: write_debug(ptr[1], reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: write_debug(ptr[0], reg, 0); \
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 426ef65601dd..5b6b8fa00f0a 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -29,6 +30,9 @@
extern const char __hyp_panic_string[];
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
@@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
- asm volatile("at s1e1r, %0" : : "r" (far));
- isb();
-
- tmp = read_sysreg(par_el1);
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg(par_el1);
+ else
+ tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
@@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
#endif
}
+static inline void __kvm_unexpected_el2_exception(void)
+{
+ unsigned long addr, fixup;
+ struct kvm_cpu_context *host_ctxt;
+ struct exception_table_entry *entry, *end;
+ unsigned long elr_el2 = read_sysreg(elr_el2);
+
+ entry = hyp_symbol_addr(__start___kvm_ex_table);
+ end = hyp_symbol_addr(__stop___kvm_ex_table);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+ while (entry < end) {
+ addr = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+ if (addr != elr_el2) {
+ entry++;
+ continue;
+ }
+
+ write_sysreg(fixup, elr_el2);
+ return;
+ }
+
+ hyp_panic(host_ctxt);
+}
+
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 341be2f2f312..0970442d2dbc 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 5a0073511efb..452f4cacd674 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
@@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
@@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
@@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
case 7:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index c52d714e0d75..c1da4f86ccac 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index a206655a39a5..9b11c096a042 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -45,7 +45,7 @@ static u32 get_cpu_asid_bits(void)
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
+ fallthrough;
case 0:
asid = 8;
break;
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index e456652facce..d05c78eace1b 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -220,7 +220,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->a4 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->a4 = regs->orig_a4;
@@ -252,7 +252,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a4 = regs->orig_a4;
regs->pc -= 4;
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 9452d6570b7e..970895df75ec 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -194,7 +194,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->pc -= TRAP0_SIZE;
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 38d335488a54..69e68949787f 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -227,7 +227,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->er0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->er0 = regs->orig_er0;
diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c
index cf99fb79a124..cb3bf19b0640 100644
--- a/arch/hexagon/kernel/module.c
+++ b/arch/hexagon/kernel/module.c
@@ -120,7 +120,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
- /* fallthrough */
+ fallthrough;
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index d48864c48e5a..94cc7ff52dce 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -155,7 +155,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->r00 = -EINTR;
break;
}
- /* Fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bec762a9b418..fec70d662d0c 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
case DIE_INIT_MONARCH_LEAVE:
if (!kdump_freeze_monarch)
break;
- /* fall through */
+ fallthrough;
case DIE_INIT_SLAVE_LEAVE:
case DIE_INIT_MONARCH_ENTER:
case DIE_MCA_RENDZVOUS_LEAVE:
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1a42ba885188..00a496cb346f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -654,7 +654,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
}
} else if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
- /* FALL THROUGH */
+ fallthrough;
default:
val -= bundle(location);
break;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 971f166873aa..0dc3611e7971 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -3472,7 +3472,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
break;
case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
- /* fall through */
+ fallthrough;
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index d07ed65c9c6e..e67b22fc3c60 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -374,7 +374,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
/* note: scr->pt.r10 is already -1 */
break;
}
- /*FALLTHRU*/
+ fallthrough;
case ERESTARTNOINTR:
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 2d4e65ba5c3e..6c1a8951dfbb 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1431,7 +1431,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LDS_IMM_OP:
case LDSA_IMM_OP:
case LDFS_OP:
@@ -1459,7 +1459,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LD_IMM_OP:
case LDA_IMM_OP:
case LDBIAS_IMM_OP:
@@ -1475,7 +1475,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
ret = emulate_store_int(ifa, u.insn, regs);
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 7601fe0622d2..6bd64c35e691 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -324,7 +324,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
return 0;
}
}
- /* fall through */
+ fallthrough;
case UNW_NAT_NONE:
dummy_nat = 0;
nat_addr = &dummy_nat;
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 37091898adb3..5e0e682f9c61 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -207,7 +207,7 @@ repeat:
self_test_last_rcv = jiffies;
break;
}
- /* FALL THROUGH */
+ fallthrough;
default:
break_flag = scancode & BREAK_MASK;
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index fc034fd19798..a98fca977073 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -1067,7 +1067,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->d0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 5c9f3a2d6538..a621fcc1a576 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -1018,7 +1018,7 @@ int __init mac_platform_init(void)
*/
platform_device_register_simple("mac_scsi", 1,
mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
- /* fall through */
+ fallthrough;
case MAC_SCSI_OLD:
/* Addresses from Developer Notes for Duo System,
* PowerBook 180 & 160, 140 & 170, Macintosh IIsi
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 1f0fad2a98a0..ac77d73af19a 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -370,7 +370,7 @@ void via_nubus_irq_startup(int irq)
/* Allow NuBus slots 9 through F. */
via2[vDirA] &= 0x80 | ~(1 << irq_idx);
}
- /* fall through */
+ fallthrough;
case MAC_VIA_IICI:
via_irq_enable(irq);
break;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 795f483b1050..ef46e77e97a5 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -118,7 +118,7 @@ good_area:
pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
- /* fall through */
+ fallthrough;
case 2: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto acc_err;
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 65bf5fd8d473..4a96b59f0bee 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -249,7 +249,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->r3 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
index 7dd4a80e05d6..6f4ac854b12d 100644
--- a/arch/mips/include/asm/unroll.h
+++ b/arch/mips/include/asm/unroll.h
@@ -28,38 +28,38 @@
BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
- case 32: fn(__VA_ARGS__); /* fall through */ \
- case 31: fn(__VA_ARGS__); /* fall through */ \
- case 30: fn(__VA_ARGS__); /* fall through */ \
- case 29: fn(__VA_ARGS__); /* fall through */ \
- case 28: fn(__VA_ARGS__); /* fall through */ \
- case 27: fn(__VA_ARGS__); /* fall through */ \
- case 26: fn(__VA_ARGS__); /* fall through */ \
- case 25: fn(__VA_ARGS__); /* fall through */ \
- case 24: fn(__VA_ARGS__); /* fall through */ \
- case 23: fn(__VA_ARGS__); /* fall through */ \
- case 22: fn(__VA_ARGS__); /* fall through */ \
- case 21: fn(__VA_ARGS__); /* fall through */ \
- case 20: fn(__VA_ARGS__); /* fall through */ \
- case 19: fn(__VA_ARGS__); /* fall through */ \
- case 18: fn(__VA_ARGS__); /* fall through */ \
- case 17: fn(__VA_ARGS__); /* fall through */ \
- case 16: fn(__VA_ARGS__); /* fall through */ \
- case 15: fn(__VA_ARGS__); /* fall through */ \
- case 14: fn(__VA_ARGS__); /* fall through */ \
- case 13: fn(__VA_ARGS__); /* fall through */ \
- case 12: fn(__VA_ARGS__); /* fall through */ \
- case 11: fn(__VA_ARGS__); /* fall through */ \
- case 10: fn(__VA_ARGS__); /* fall through */ \
- case 9: fn(__VA_ARGS__); /* fall through */ \
- case 8: fn(__VA_ARGS__); /* fall through */ \
- case 7: fn(__VA_ARGS__); /* fall through */ \
- case 6: fn(__VA_ARGS__); /* fall through */ \
- case 5: fn(__VA_ARGS__); /* fall through */ \
- case 4: fn(__VA_ARGS__); /* fall through */ \
- case 3: fn(__VA_ARGS__); /* fall through */ \
- case 2: fn(__VA_ARGS__); /* fall through */ \
- case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
case 0: break; \
\
default: \
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index 62bdafbc53f4..9edd7ed7d7bf 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -45,7 +45,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fsdi $fd15, [%0+0x78]\n\t"
"fsdi $fd14, [%0+0x70]\n\t"
@@ -58,7 +58,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fsdi $fd7, [%0+0x38]\n\t"
"fsdi $fd6, [%0+0x30]\n\t"
@@ -67,7 +67,7 @@ void save_fpu(struct task_struct *tsk)
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fsdi $fd3, [%1+0x18]\n\t"
"fsdi $fd2, [%1+0x10]\n\t"
@@ -108,7 +108,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd16, [%0+0x80]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fldi $fd15, [%0+0x78]\n\t"
"fldi $fd14, [%0+0x70]\n\t"
@@ -120,7 +120,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd8, [%0+0x40]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fldi $fd7, [%0+0x38]\n\t"
"fldi $fd6, [%0+0x30]\n\t"
@@ -128,7 +128,7 @@ void load_fpu(const struct fpu_struct *fpregs)
"fldi $fd4, [%0+0x20]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fldi $fd3, [%1+0x18]\n\t"
"fldi $fd2, [%1+0x10]\n\t"
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index 330b19fcd990..36e25a410bb0 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,7 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->uregs[0] = -EINTR;
break;
}
- /* Else, fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->uregs[0] = regs->orig_r0;
regs->ipc -= 4;
@@ -361,7 +361,7 @@ static void do_signal(struct pt_regs *regs)
switch (regs->uregs[0]) {
case -ERESTART_RESTARTBLOCK:
regs->uregs[15] = __NR_restart_syscall;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 97804f21a40c..c779364f0cd0 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -244,7 +244,7 @@ int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart = -2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 5df5d4cd5d4c..3c037fc96038 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -502,7 +502,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
regs->gr[28] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
check_syscallno_in_delay_branch(regs);
break;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 43875c289723..a52c7abf2ca4 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -437,7 +437,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
break;
default:
- /* Fall through */
break;
}
@@ -644,12 +643,12 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 15:
/* Data TLB miss fault/Data page fault */
- /* Fall through */
+ fallthrough;
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
- /* Fall through */
+ fallthrough;
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
@@ -673,7 +672,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
handle_unaligned(regs);
return;
}
- /* Fall Through */
+ fallthrough;
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
@@ -683,7 +682,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
- /* fall thru */
+ fallthrough;
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
@@ -730,7 +729,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
- /* Fall Through */
+ fallthrough;
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 4bfe2da9fbe3..716960f5d92e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -67,7 +67,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
- /* fall through */
+ fallthrough;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
@@ -370,7 +370,7 @@ bad_area:
}
/* probably address is outside of mapped file */
- /* fall through */
+ fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1f48bbfb3ce9..65bed1fdeaad 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -860,6 +860,18 @@ config PPC_SUBPAGE_PROT
If unsure, say N here.
+config PPC_PROT_SAO_LPAR
+ bool "Support PROT_SAO mappings in LPARs"
+ depends on PPC_BOOK3S_64
+ help
+ This option adds support for PROT_SAO mappings from userspace
+ inside LPARs on supported CPUs.
+
+ This may cause issues when performing guest migration from
+ a CPU that supports SAO to one that does not.
+
+ If unsure, say N here.
+
config PPC_COPRO_BASE
bool
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6de56c3b33c4..495fc0ccb453 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -20,13 +20,9 @@
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
-
-#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
- /* No bits set is normal cacheable memory */
- /* 0x00010 unused, is SAO bit on radix POWER9 */
+#define _PAGE_SAO 0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
-
#define _PAGE_DIRTY 0x00080 /* C: page changed */
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
/*
@@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
}
+#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e005b4581023..32a15dc49e8c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -196,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
-// Free LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
@@ -441,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
@@ -450,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
@@ -461,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
@@ -479,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7c07728af300..7cb6d18f5cd6 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,20 +13,43 @@
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
-#ifdef CONFIG_PPC_MEM_KEYS
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- return pkey_to_vmflag_bits(pkey);
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+ return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (vm_flags & VM_SAO) ?
+ __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+ __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-#endif
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
+ return true;
+}
+#define arch_validate_prot arch_validate_prot
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 59ee9fa4ae09..6cb8aa357191 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -82,6 +82,8 @@
*/
#include <asm/nohash/pte-book3e.h>
+#define _PAGE_SAO 0
+
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
/*
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
index 3a700351feca..c0c737215b00 100644
--- a/arch/powerpc/include/uapi/asm/mman.h
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -11,7 +11,7 @@
#include <asm-generic/mman-common.h>
-#define PROT_SAO 0x10 /* Unsupported since v5.9 */
+#define PROT_SAO 0x10 /* Strong Access Ordering */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 8dc46f38680b..f204ad79b6b5 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -653,7 +653,7 @@ static struct dt_cpu_feature_match __initdata
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
{"no-execute", feat_enable, 0},
- /* strong-access-ordering is unused */
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
{"coprocessor-icswx", feat_enable, 0},
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 33a42e42c56f..733e40eba4eb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
/*
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
* would clobber syscall parameters. Also we always enter with IRQs
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 016bd831908e..73a57043ee66 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs)
* are live for the user thread).
*/
if ((!(msr & MSR_FP)) && should_restore_fp())
- new_msr |= MSR_FP | current->thread.fpexc_mode;
+ new_msr |= MSR_FP;
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
@@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs)
}
if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
msr_check_and_set(new_msr);
- if (new_msr & MSR_FP)
+ if (new_msr & MSR_FP) {
do_restore_fp();
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
if (new_msr & MSR_VEC)
do_restore_altivec();
@@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr;
+ regs->msr |= new_msr | fpexc_mode;
}
}
#endif
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 890a71c5293e..c663e7ba801f 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
rflags |= HPTE_R_I;
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
else
/*
* Add memory coherence if cache inhibited is not set
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 16d09b36fe06..78d61f97371e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -475,7 +475,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE;
- /* Fall through */
+ fallthrough;
cond_branch:
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 93d20e1ed845..08643cba1494 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1557,9 +1557,16 @@ nocheck:
ret = 0;
out:
if (has_branch_stack(event)) {
- power_pmu_bhrb_enable(event);
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter != -1) {
+ cpuhw->bhrb_filter = bhrb_filter;
+ power_pmu_bhrb_enable(event);
+ }
}
perf_pmu_enable(event->pmu);
@@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
- u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- bhrb_filter = ppmu->bhrb_filter_map(
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index a45d694a5d5d..62d0b54086f8 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc = 0;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
+ switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
case 0:/* when MSR HV and PR not set in the trace-record */
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
break;
@@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->misc |= PERF_RECORD_MISC_GUEST_USER;
break;
case 2: /* MSR HV is 1 and PR is 0 */
- header->misc |= PERF_RECORD_MISC_HYPERVISOR;
+ header->misc |= PERF_RECORD_MISC_KERNEL;
break;
case 3: /* MSR HV is 1 and PR is 1 */
header->misc |= PERF_RECORD_MISC_USER;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 87737ec86d39..1dc9d3c81872 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
select PPC_HAVE_PMU_SUPPORT
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
- select HAVE_ARCH_VMAP_STACK
+ select HAVE_ARCH_VMAP_STACK if !ADB_PMU
config PPC_BOOK3S_601
bool "PowerPC 601"
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 77513a80cef9..345ab062b21a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void)
return;
}
- if (pvr_version_is(PVR_POWER9))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
pnv_power9_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 17ba190e84a5..e996e08f1061 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->epc -= 0x4;
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index bc5f2204693f..579575f9cdae 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1020,7 +1020,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_zext64(dst, ctx);
break;
}
- /* Fallthrough. */
+ fallthrough;
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
@@ -1079,7 +1079,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case 16:
emit(rv_slli(lo(rd), lo(rd), 16), ctx);
emit(rv_srli(lo(rd), lo(rd), 16), ctx);
- /* Fallthrough. */
+ fallthrough;
case 32:
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 50b4ce8cddfd..918f0ba4f4d2 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -29,7 +29,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
@@ -37,7 +37,7 @@
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
new__; \
})
@@ -68,7 +68,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
@@ -84,7 +84,7 @@
: [val__] "d" (val__) \
: "cc"); \
} \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
@@ -95,14 +95,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
old__ + val__; \
})
@@ -114,14 +114,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
@@ -136,10 +136,10 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -152,10 +152,10 @@
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -171,11 +171,11 @@
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1aed1a4dfc2d..eddf71c22875 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
+ p4d_populate(&init_mm, p4d, pud);
}
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
index f3dc3f25b3ff..143747c45206 100644
--- a/arch/sh/drivers/platform_early.c
+++ b/arch/sh/drivers/platform_early.c
@@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str,
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
- /* fall-through */
+ fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index 08e1af63edd9..34e25a439c81 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rn & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
@@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rm & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 0d5f3c9d52f3..e4147efa9ec6 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
- /* fallthrough */
+ fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index a0fbb8427b39..4fe3f00137bc 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 4843f48bfe85..774a82b0c649 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -87,7 +87,6 @@ void auxio_set_lte(int on)
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
- /* FALL-THROUGH */
default:
break;
}
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index bfae98ab8638..23f8838dd96e 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p)
else
return 5;
}
- /* Fallthrough */
+ fallthrough;
default:
return 4;
}
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 7580775a14b9..58ad3f7de1fb 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 5d6c2d287e85..177746ae2c81 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c0886b400dad..2a12c86af956 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -359,7 +359,7 @@ int __init pcr_arch_init(void)
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
- /* fallthrough */
+ fallthrough;
default:
err = -ENODEV;
goto out_unregister;
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index da8902295c8c..3df960c137f7 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -224,7 +224,7 @@ void __init of_console_init(void)
case PROMDEV_TTYB:
skip = 1;
- /* FALLTHRU */
+ fallthrough;
case PROMDEV_TTYA:
type = "serial";
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index e2c6f0abda00..e9695a06492f 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index f1f8c8ebe641..d0e0025ee3ba 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
@@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 6937339a272c..255264bcb46a 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
@@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index 72e560ef4a09..d5beec856146 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
@@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
- /* fall through */
+ fallthrough;
case 1:
rd = (void *)&fregs[freg];
break;
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index c8eabb973b86..b1dbf2fa8c0a 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
} else {
emit_loadimm(K, r_A);
}
- /* Fallthrough */
+ fallthrough;
case BPF_RET | BPF_A:
if (seen_or_pass0) {
if (i != flen - 1) {
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3d57c71c532e..88cd9b5c1b74 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -70,7 +70,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
PT_REGS_RESTART_SYSCALL(regs);
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c
index 4ff01176c1cc..21d56ae83cdf 100644
--- a/arch/x86/boot/cmdline.c
+++ b/arch/x86/boot/cmdline.c
@@ -54,7 +54,7 @@ int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *b
/* else */
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (c == '=' && !*opptr) {
@@ -129,7 +129,7 @@ int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option)
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 0048269180d5..dde7cb3724df 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -178,7 +178,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
}
*size = 0;
}
- /* Fall through */
+ fallthrough;
default:
/*
* If w/o offset, only size specified, memmap=nn[KMG] has the
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 50963472ee85..31e6887d24f1 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4682,7 +4682,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
- /* fall through */
+ fallthrough;
case INTEL_FAM6_CORE2_MEROM_L:
case INTEL_FAM6_CORE2_PENRYN:
@@ -5062,7 +5062,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE_X:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L:
@@ -5114,7 +5114,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_ICELAKE_L:
case INTEL_FAM6_ICELAKE:
case INTEL_FAM6_TIGERLAKE_L:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 63f58bdf556c..8961653c5dd2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1268,7 +1268,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_ZERO_CALL;
break;
}
- /* fall through */
+ fallthrough;
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c3daf0aaa0ee..cdaab30880b9 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -239,7 +239,7 @@ void __init arch_init_ideal_nops(void)
return;
}
- /* fall through */
+ fallthrough;
default:
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 21325a4a78b9..779a89e31c4c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -800,7 +800,7 @@ static int irq_polarity(int idx)
return IOAPIC_POL_HIGH;
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- /* fall through */
+ fallthrough;
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
@@ -848,7 +848,7 @@ static int irq_trigger(int idx)
return IOAPIC_EDGE;
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- /* fall through */
+ fallthrough;
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 7bda71def557..99ee61c9ba54 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -149,7 +149,7 @@ void __init default_setup_apic_routing(void)
break;
}
/* P4 and above */
- /* fall through */
+ fallthrough;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index c7503be92f35..57074cf3ad7c 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -248,7 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
switch (leaf) {
case 1:
l1 = &l1i;
- /* fall through */
+ fallthrough;
case 0:
if (!l1->val)
return;
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 7843ab3fde09..3a44346f2276 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -199,7 +199,7 @@ static int raise_local(void)
* calling irq_enter, but the necessary
* machinery isn't exported currently.
*/
- /*FALL THROUGH*/
+ fallthrough;
case MCJ_CTX_PROCESS:
raise_exception(m, NULL);
break;
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index d8f9230d2034..abe9fe0fb851 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -193,7 +193,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
if (!atomic_sub_return(1, &cmci_storm_on_cpus))
pr_notice("CMCI storm subsided: switching to interrupt mode\n");
- /* FALLTHROUGH */
+ fallthrough;
case CMCI_STORM_SUBSIDED:
/*
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 72182809b333..ca670919b561 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -98,7 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
case 7:
if (size < 0x40)
break;
- /* Else, fall through */
+ fallthrough;
case 6:
case 5:
case 4:
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8cdf29ffd95f..b98ff620ba77 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -349,7 +349,7 @@ static int arch_build_bp_info(struct perf_event *bp,
hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 68acd30c6b87..c2f02f308ecf 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -450,7 +450,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
/* clear the trace bit */
@@ -539,7 +539,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
* a system call which should be ignored
*/
return NOTIFY_DONE;
- /* fall through */
+ fallthrough;
default:
if (user_mode(regs))
return NOTIFY_DONE;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 411af4aa7b51..baa21090c9be 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -312,7 +312,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
case 2:
if (i == 0 || i == 13)
continue; /* IRQ0 & IRQ13 not connected */
- /* fall through */
+ fallthrough;
default:
if (i == 2)
continue; /* IRQ2 is never connected */
@@ -356,7 +356,7 @@ static void __init construct_ioapic_table(int mpc_default_type)
default:
pr_err("???\nUnknown standard configuration %d\n",
mpc_default_type);
- /* fall through */
+ fallthrough;
case 1:
case 5:
memcpy(bus.bustype, "ISA ", 6);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 5679aa3fdcb8..e7537c5440bb 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -204,7 +204,7 @@ static int set_segment_reg(struct task_struct *task,
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
- /* Else, fall through */
+ fallthrough;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0ec7ced727fe..a515e2d230b7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -654,7 +654,7 @@ static void native_machine_emergency_restart(void)
case BOOT_CF9_FORCE:
port_cf9_safe = true;
- /* Fall through */
+ fallthrough;
case BOOT_CF9_SAFE:
if (port_cf9_safe) {
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index d5fa494c2304..be0d7d4152ec 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -726,7 +726,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->ax = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->ax = regs->orig_ax;
regs->ip -= 2;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 15e5aad8ac2c..3fdaa042823d 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -735,7 +735,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* OPCODE1() of the "short" jmp which checks the same condition.
*/
opc1 = OPCODE2(insn) - 0x10;
- /* fall through */
+ fallthrough;
default:
if (!is_cond_jmp_opcode(opc1))
return -ENOSYS;
@@ -892,7 +892,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
fix_ip_or_call = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
riprel_analyze(auprobe, &insn);
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d0e2825ae617..5299ef5ff18d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3016,7 +3016,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
- /* fall through */
+ fallthrough;
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 814d3aee5cef..1d330564eed8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
- /* fall through - maybe userspace knows this conn_id. */
+ fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index c47d2acec529..4aa1c2e00e2a 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
switch (ue->u.irqchip.irqchip) {
case KVM_IRQCHIP_PIC_SLAVE:
e->irqchip.pin += PIC_NUM_PINS / 2;
- /* fall through */
+ fallthrough;
case KVM_IRQCHIP_PIC_MASTER:
if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
return -EINVAL;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5ccbee7165a2..35cca2e0c802 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
- /* fall through */
+ fallthrough;
case APIC_DM_FIXED:
if (unlikely(trig_mode && !level))
break;
@@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
- /* fall thru */
+ fallthrough;
default:
val = kvm_lapic_get_reg(apic, offset);
break;
@@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
- /* fall through */
+ fallthrough;
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a5d0207e7189..43fdb0c12a5d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4422,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
- /* fall through */
+ fallthrough;
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 03dd7bac8034..0194336b64a4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2668,7 +2668,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
- /* Fall through */
+ fallthrough;
default:
return kvm_set_msr_common(vcpu, msr);
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46ba2e03a892..819c185adf09 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4654,7 +4654,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return false;
- /* fall through */
+ fallthrough;
case DB_VECTOR:
return !(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
@@ -4827,7 +4827,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
- /* fall through */
+ fallthrough;
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
@@ -5257,7 +5257,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
- /* fall through */
+ fallthrough;
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -5610,7 +5610,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
* keeping track of global entries in shadow page tables.
*/
- /* fall-through */
+ fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
@@ -6578,7 +6578,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
@@ -6588,7 +6588,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 33945283fe07..d39d6cf1d473 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1116,14 +1116,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- /* fall through */
case 6:
if (!kvm_dr6_valid(val))
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break;
case 5:
- /* fall through */
default: /* 7 */
if (!kvm_dr7_valid(val))
return -1; /* #GP */
@@ -1154,12 +1152,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
- /* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
- /* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
@@ -3051,7 +3047,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
- pr = true; /* fall through */
+ pr = true;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
@@ -4359,7 +4356,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0])
return -EINVAL;
- /* fall through */
+ fallthrough;
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
@@ -8672,7 +8669,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- /* fall through */
+ fallthrough;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 4f1719e22d3c..b6da09339308 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -58,7 +58,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr) {
@@ -89,7 +89,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (!c)
@@ -151,7 +151,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if ((c == '=') && !*opptr) {
@@ -172,7 +172,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (myisspace(c))
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 31600d851fd8..5e69603ff63f 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -179,7 +179,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
if (insn->addr_bytes == 2)
return -EINVAL;
- /* fall through */
+ fallthrough;
case -EDOM:
case offsetof(struct pt_regs, bx):
@@ -362,7 +362,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_GS:
return vm86regs->gs;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -386,7 +385,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
*/
return get_user_gs(regs);
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
@@ -786,7 +784,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
*/
return INSN_CODE_SEG_PARAMS(4, 8);
case 3: /* Invalid setting. CS.L=1, CS.D=1 */
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 73dc66d887f3..ec071cbb0804 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -186,7 +186,7 @@ void FPU_printall(void)
case TAG_Special:
/* Update tagi for the printk below */
tagi = FPU_Special(r);
- /* fall through */
+ fallthrough;
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 127ea54122d7..4a9887851ad8 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
case TW_Denormal:
if (denormal_operand() < 0)
return;
- /* fall through */
+ fallthrough;
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 84d85dbd1dad..9e5ccc56f8e0 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -574,7 +574,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
/* For SEV, these areas are encrypted */
if (sev_active())
break;
- /* Fallthrough */
+ fallthrough;
case E820_TYPE_PRAM:
return true;
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 76cee341507b..b3b17d6c50f0 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs)
regs->areg[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;