summaryrefslogtreecommitdiff
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 03:17:34 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 03:17:34 +0300
commitf2772a0e4833d1af1901b6f1a38136fb71d1350c (patch)
tree28bf6f0944d745e19947273022224ebc6b2d07c8 /arch/um/kernel
parentfcd98147ac71f35b69e2f50b5fddc5524dd2dfa8 (diff)
parentb482e48d29f1461fd0d059a17f32bcfa274127b3 (diff)
downloadlinux-f2772a0e4833d1af1901b6f1a38136fb71d1350c.tar.xz
Merge tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
Pull UML updates from Richard Weinberger: - A new timer mode, time travel, for testing with UML - Many bugixes/improvements for the serial line driver - Various bugfixes * tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml: um: fix build without CONFIG_UML_TIME_TRAVEL_SUPPORT um: Fix kcov crash during startup um: configs: Remove useless UEVENT_HELPER_PATH um: Support time travel mode um: Pass nsecs to os timer functions um: Remove drivers/ssl.h um: Don't garbage collect in deactivate_all_fds() um: Silence lockdep complaint about mmap_sem um: Remove locking in deactivate_all_fds() um: Timer code cleanup um: fix os_timer_one_shot() um: Fix IRQ controller regression on console read
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/irq.c9
-rw-r--r--arch/um/kernel/process.c42
-rw-r--r--arch/um/kernel/skas/Makefile2
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/kernel/time.c131
5 files changed, 183 insertions, 12 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 598d7b3d9355..efde1f16c603 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -21,6 +21,8 @@
#include <irq_user.h>
+extern void free_irqs(void);
+
/* When epoll triggers we do not know why it did so
* we can also have different IRQs for read and write.
* This is why we keep a small irq_fd array for each fd -
@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
}
}
}
+
+ free_irqs();
}
static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
@@ -380,10 +384,8 @@ EXPORT_SYMBOL(deactivate_fd);
*/
int deactivate_all_fds(void)
{
- unsigned long flags;
struct irq_entry *to_free;
- spin_lock_irqsave(&irq_lock, flags);
/* Stop IO. The IRQ loop has no lock so this is our
* only way of making sure we are safe to dispose
* of all IRQ handlers
@@ -399,8 +401,7 @@ int deactivate_all_fds(void)
);
to_free = to_free->next;
}
- garbage_collect_irq_entries();
- spin_unlock_irqrestore(&irq_lock, flags);
+ /* don't garbage collect - we can no longer call kfree() here */
os_close_epoll_fd();
return 0;
}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 691b83b10649..67c0d1a860e9 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -203,10 +203,50 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
kmalloc_ok = save_kmalloc_ok;
}
+static void time_travel_sleep(unsigned long long duration)
+{
+ unsigned long long next = time_travel_time + duration;
+
+ if (time_travel_mode != TT_MODE_INFCPU)
+ os_timer_disable();
+
+ if (time_travel_timer_mode != TT_TMR_DISABLED ||
+ time_travel_timer_expiry < next) {
+ if (time_travel_timer_mode == TT_TMR_ONESHOT)
+ time_travel_set_timer(TT_TMR_DISABLED, 0);
+ /*
+ * time_travel_time will be adjusted in the timer
+ * IRQ handler so it works even when the signal
+ * comes from the OS timer
+ */
+ deliver_alarm();
+ } else {
+ time_travel_set_time(next);
+ }
+
+ if (time_travel_mode != TT_MODE_INFCPU) {
+ if (time_travel_timer_mode == TT_TMR_PERIODIC)
+ os_timer_set_interval(time_travel_timer_interval);
+ else if (time_travel_timer_mode == TT_TMR_ONESHOT)
+ os_timer_one_shot(time_travel_timer_expiry - next);
+ }
+}
+
+static void um_idle_sleep(void)
+{
+ unsigned long long duration = UM_NSEC_PER_SEC;
+
+ if (time_travel_mode != TT_MODE_OFF) {
+ time_travel_sleep(duration);
+ } else {
+ os_idle_sleep(duration);
+ }
+}
+
void arch_cpu_idle(void)
{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
- os_idle_sleep(UM_NSEC_PER_SEC);
+ um_idle_sleep();
local_irq_enable();
}
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index 0b76d8869c94..5bd3edfcfedf 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -12,4 +12,6 @@ obj-y := clone.o mmu.o process.o syscall.o uaccess.o
CFLAGS_clone.o := $(CFLAGS_NO_HARDENING)
UNPROFILE_OBJS := clone.o
+KCOV_INSTRUMENT := n
+
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index b783ac87d98a..44bb10785075 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -10,12 +10,23 @@
#include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h>
#include <sysdep/syscalls.h>
+#include <shared/timer-internal.h>
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
int syscall;
+ /*
+ * If we have infinite CPU resources, then make every syscall also a
+ * preemption point, since we don't have any other preemption in this
+ * case, and kernel threads would basically never run until userspace
+ * went to sleep, even if said userspace interacts with the kernel in
+ * various ways.
+ */
+ if (time_travel_mode == TT_MODE_INFCPU)
+ schedule();
+
/* Initialize the syscall number and default return value. */
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 0c572a48158e..6a051b078359 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -19,11 +19,29 @@
#include <kern_util.h>
#include <os.h>
#include <timer-internal.h>
+#include <shared/init.h>
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+enum time_travel_mode time_travel_mode;
+unsigned long long time_travel_time;
+enum time_travel_timer_mode time_travel_timer_mode;
+unsigned long long time_travel_timer_expiry;
+unsigned long long time_travel_timer_interval;
+
+static bool time_travel_start_set;
+static unsigned long long time_travel_start;
+#else
+#define time_travel_start_set 0
+#define time_travel_start 0
+#endif
void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
unsigned long flags;
+ if (time_travel_mode != TT_MODE_OFF)
+ time_travel_set_time(time_travel_timer_expiry);
+
local_irq_save(flags);
do_IRQ(TIMER_IRQ, regs);
local_irq_restore(flags);
@@ -31,26 +49,47 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
static int itimer_shutdown(struct clock_event_device *evt)
{
- os_timer_disable();
+ if (time_travel_mode != TT_MODE_OFF)
+ time_travel_set_timer(TT_TMR_DISABLED, 0);
+
+ if (time_travel_mode != TT_MODE_INFCPU)
+ os_timer_disable();
+
return 0;
}
static int itimer_set_periodic(struct clock_event_device *evt)
{
- os_timer_set_interval(NULL, NULL);
+ unsigned long long interval = NSEC_PER_SEC / HZ;
+
+ if (time_travel_mode != TT_MODE_OFF)
+ time_travel_set_timer(TT_TMR_PERIODIC,
+ time_travel_time + interval);
+
+ if (time_travel_mode != TT_MODE_INFCPU)
+ os_timer_set_interval(interval);
+
return 0;
}
static int itimer_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- return os_timer_one_shot(delta);
+ delta += 1;
+
+ if (time_travel_mode != TT_MODE_OFF)
+ time_travel_set_timer(TT_TMR_ONESHOT,
+ time_travel_time + delta);
+
+ if (time_travel_mode != TT_MODE_INFCPU)
+ return os_timer_one_shot(delta);
+
+ return 0;
}
static int itimer_one_shot(struct clock_event_device *evt)
{
- os_timer_one_shot(1);
- return 0;
+ return itimer_next_event(0, evt);
}
static struct clock_event_device timer_clockevent = {
@@ -87,6 +126,17 @@ static irqreturn_t um_timer(int irq, void *dev)
static u64 timer_read(struct clocksource *cs)
{
+ if (time_travel_mode != TT_MODE_OFF) {
+ /*
+ * We make reading the timer cost a bit so that we don't get
+ * stuck in loops that expect time to move more than the
+ * exact requested sleep amount, e.g. python's socket server,
+ * see https://bugs.python.org/issue37026.
+ */
+ time_travel_set_time(time_travel_time + TIMER_MULTIPLIER);
+ return time_travel_time / TIMER_MULTIPLIER;
+ }
+
return os_nsecs() / TIMER_MULTIPLIER;
}
@@ -107,7 +157,7 @@ static void __init um_timer_setup(void)
printk(KERN_ERR "register_timer : request_irq failed - "
"errno = %d\n", -err);
- err = os_timer_create(NULL);
+ err = os_timer_create();
if (err != 0) {
printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
return;
@@ -123,7 +173,12 @@ static void __init um_timer_setup(void)
void read_persistent_clock64(struct timespec64 *ts)
{
- long long nsecs = os_persistent_clock_emulation();
+ long long nsecs;
+
+ if (time_travel_start_set)
+ nsecs = time_travel_start + time_travel_time;
+ else
+ nsecs = os_persistent_clock_emulation();
set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
nsecs % NSEC_PER_SEC);
@@ -134,3 +189,65 @@ void __init time_init(void)
timer_set_signal_handler();
late_time_init = um_timer_setup;
}
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+unsigned long calibrate_delay_is_known(void)
+{
+ if (time_travel_mode == TT_MODE_INFCPU)
+ return 1;
+ return 0;
+}
+
+int setup_time_travel(char *str)
+{
+ if (strcmp(str, "=inf-cpu") == 0) {
+ time_travel_mode = TT_MODE_INFCPU;
+ timer_clockevent.name = "time-travel-timer-infcpu";
+ timer_clocksource.name = "time-travel-clock";
+ return 1;
+ }
+
+ if (!*str) {
+ time_travel_mode = TT_MODE_BASIC;
+ timer_clockevent.name = "time-travel-timer";
+ timer_clocksource.name = "time-travel-clock";
+ return 1;
+ }
+
+ return -EINVAL;
+}
+
+__setup("time-travel", setup_time_travel);
+__uml_help(setup_time_travel,
+"time-travel\n"
+"This option just enables basic time travel mode, in which the clock/timers\n"
+"inside the UML instance skip forward when there's nothing to do, rather than\n"
+"waiting for real time to elapse. However, instance CPU speed is limited by\n"
+"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
+"clock (but quicker when there's nothing to do).\n"
+"\n"
+"time-travel=inf-cpu\n"
+"This enables time travel mode with infinite processing power, in which there\n"
+"are no wall clock timers, and any CPU processing happens - as seen from the\n"
+"guest - instantly. This can be useful for accurate simulation regardless of\n"
+"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
+"easily lead to getting stuck (e.g. if anything in the system busy loops).\n");
+
+int setup_time_travel_start(char *str)
+{
+ int err;
+
+ err = kstrtoull(str, 0, &time_travel_start);
+ if (err)
+ return err;
+
+ time_travel_start_set = 1;
+ return 1;
+}
+
+__setup("time-travel-start", setup_time_travel_start);
+__uml_help(setup_time_travel_start,
+"time-travel-start=<seconds>\n"
+"Configure the UML instance's wall clock to start at this value rather than\n"
+"the host's wall clock at the time of UML boot.\n");
+#endif