Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
0004  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
0005  * Copyright (C) 2012-2014 Cisco Systems
0006  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
0007  * Copyright (C) 2019 Intel Corporation
0008  */
0009 
0010 #include <linux/clockchips.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/jiffies.h>
0014 #include <linux/mm.h>
0015 #include <linux/sched.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/threads.h>
0018 #include <asm/irq.h>
0019 #include <asm/param.h>
0020 #include <kern_util.h>
0021 #include <os.h>
0022 #include <linux/time-internal.h>
0023 #include <linux/um_timetravel.h>
0024 #include <shared/init.h>
0025 
0026 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0027 enum time_travel_mode time_travel_mode;
0028 EXPORT_SYMBOL_GPL(time_travel_mode);
0029 
0030 static bool time_travel_start_set;
0031 static unsigned long long time_travel_start;
0032 static unsigned long long time_travel_time;
0033 static LIST_HEAD(time_travel_events);
0034 static LIST_HEAD(time_travel_irqs);
0035 static unsigned long long time_travel_timer_interval;
0036 static unsigned long long time_travel_next_event;
0037 static struct time_travel_event time_travel_timer_event;
0038 static int time_travel_ext_fd = -1;
0039 static unsigned int time_travel_ext_waiting;
0040 static bool time_travel_ext_prev_request_valid;
0041 static unsigned long long time_travel_ext_prev_request;
0042 static bool time_travel_ext_free_until_valid;
0043 static unsigned long long time_travel_ext_free_until;
0044 
0045 static void time_travel_set_time(unsigned long long ns)
0046 {
0047     if (unlikely(ns < time_travel_time))
0048         panic("time-travel: time goes backwards %lld -> %lld\n",
0049               time_travel_time, ns);
0050     else if (unlikely(ns >= S64_MAX))
0051         panic("The system was going to sleep forever, aborting");
0052 
0053     time_travel_time = ns;
0054 }
0055 
0056 enum time_travel_message_handling {
0057     TTMH_IDLE,
0058     TTMH_POLL,
0059     TTMH_READ,
0060 };
0061 
0062 static void time_travel_handle_message(struct um_timetravel_msg *msg,
0063                        enum time_travel_message_handling mode)
0064 {
0065     struct um_timetravel_msg resp = {
0066         .op = UM_TIMETRAVEL_ACK,
0067     };
0068     int ret;
0069 
0070     /*
0071      * We can't unlock here, but interrupt signals with a timetravel_handler
0072      * (see um_request_irq_tt) get to the timetravel_handler anyway.
0073      */
0074     if (mode != TTMH_READ) {
0075         BUG_ON(mode == TTMH_IDLE && !irqs_disabled());
0076 
0077         while (os_poll(1, &time_travel_ext_fd) != 0) {
0078             /* nothing */
0079         }
0080     }
0081 
0082     ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
0083 
0084     if (ret == 0)
0085         panic("time-travel external link is broken\n");
0086     if (ret != sizeof(*msg))
0087         panic("invalid time-travel message - %d bytes\n", ret);
0088 
0089     switch (msg->op) {
0090     default:
0091         WARN_ONCE(1, "time-travel: unexpected message %lld\n",
0092               (unsigned long long)msg->op);
0093         break;
0094     case UM_TIMETRAVEL_ACK:
0095         return;
0096     case UM_TIMETRAVEL_RUN:
0097         time_travel_set_time(msg->time);
0098         break;
0099     case UM_TIMETRAVEL_FREE_UNTIL:
0100         time_travel_ext_free_until_valid = true;
0101         time_travel_ext_free_until = msg->time;
0102         break;
0103     }
0104 
0105     resp.seq = msg->seq;
0106     os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
0107 }
0108 
0109 static u64 time_travel_ext_req(u32 op, u64 time)
0110 {
0111     static int seq;
0112     int mseq = ++seq;
0113     struct um_timetravel_msg msg = {
0114         .op = op,
0115         .time = time,
0116         .seq = mseq,
0117     };
0118 
0119     /*
0120      * We need to block even the timetravel handlers of SIGIO here and
0121      * only restore their use when we got the ACK - otherwise we may
0122      * (will) get interrupted by that, try to queue the IRQ for future
0123      * processing and thus send another request while we're still waiting
0124      * for an ACK, but the peer doesn't know we got interrupted and will
0125      * send the ACKs in the same order as the message, but we'd need to
0126      * see them in the opposite order ...
0127      *
0128      * This wouldn't matter *too* much, but some ACKs carry the
0129      * current time (for UM_TIMETRAVEL_GET) and getting another
0130      * ACK without a time would confuse us a lot!
0131      *
0132      * The sequence number assignment that happens here lets us
0133      * debug such message handling issues more easily.
0134      */
0135     block_signals_hard();
0136     os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
0137 
0138     while (msg.op != UM_TIMETRAVEL_ACK)
0139         time_travel_handle_message(&msg, TTMH_READ);
0140 
0141     if (msg.seq != mseq)
0142         panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
0143               msg.op, msg.seq, mseq, msg.time);
0144 
0145     if (op == UM_TIMETRAVEL_GET)
0146         time_travel_set_time(msg.time);
0147     unblock_signals_hard();
0148 
0149     return msg.time;
0150 }
0151 
0152 void __time_travel_wait_readable(int fd)
0153 {
0154     int fds[2] = { fd, time_travel_ext_fd };
0155     int ret;
0156 
0157     if (time_travel_mode != TT_MODE_EXTERNAL)
0158         return;
0159 
0160     while ((ret = os_poll(2, fds))) {
0161         struct um_timetravel_msg msg;
0162 
0163         if (ret == 1)
0164             time_travel_handle_message(&msg, TTMH_READ);
0165     }
0166 }
0167 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
0168 
0169 static void time_travel_ext_update_request(unsigned long long time)
0170 {
0171     if (time_travel_mode != TT_MODE_EXTERNAL)
0172         return;
0173 
0174     /* asked for exactly this time previously */
0175     if (time_travel_ext_prev_request_valid &&
0176         time == time_travel_ext_prev_request)
0177         return;
0178 
0179     /*
0180      * if we're running and are allowed to run past the request
0181      * then we don't need to update it either
0182      */
0183     if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
0184         time < time_travel_ext_free_until)
0185         return;
0186 
0187     time_travel_ext_prev_request = time;
0188     time_travel_ext_prev_request_valid = true;
0189     time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
0190 }
0191 
0192 void __time_travel_propagate_time(void)
0193 {
0194     static unsigned long long last_propagated;
0195 
0196     if (last_propagated == time_travel_time)
0197         return;
0198 
0199     time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
0200     last_propagated = time_travel_time;
0201 }
0202 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
0203 
0204 /* returns true if we must do a wait to the simtime device */
0205 static bool time_travel_ext_request(unsigned long long time)
0206 {
0207     /*
0208      * If we received an external sync point ("free until") then we
0209      * don't have to request/wait for anything until then, unless
0210      * we're already waiting.
0211      */
0212     if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
0213         time < time_travel_ext_free_until)
0214         return false;
0215 
0216     time_travel_ext_update_request(time);
0217     return true;
0218 }
0219 
0220 static void time_travel_ext_wait(bool idle)
0221 {
0222     struct um_timetravel_msg msg = {
0223         .op = UM_TIMETRAVEL_ACK,
0224     };
0225 
0226     time_travel_ext_prev_request_valid = false;
0227     time_travel_ext_free_until_valid = false;
0228     time_travel_ext_waiting++;
0229 
0230     time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
0231 
0232     /*
0233      * Here we are deep in the idle loop, so we have to break out of the
0234      * kernel abstraction in a sense and implement this in terms of the
0235      * UML system waiting on the VQ interrupt while sleeping, when we get
0236      * the signal it'll call time_travel_ext_vq_notify_done() completing the
0237      * call.
0238      */
0239     while (msg.op != UM_TIMETRAVEL_RUN)
0240         time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
0241 
0242     time_travel_ext_waiting--;
0243 
0244     /* we might request more stuff while polling - reset when we run */
0245     time_travel_ext_prev_request_valid = false;
0246 }
0247 
0248 static void time_travel_ext_get_time(void)
0249 {
0250     time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
0251 }
0252 
0253 static void __time_travel_update_time(unsigned long long ns, bool idle)
0254 {
0255     if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
0256         time_travel_ext_wait(idle);
0257     else
0258         time_travel_set_time(ns);
0259 }
0260 
0261 static struct time_travel_event *time_travel_first_event(void)
0262 {
0263     return list_first_entry_or_null(&time_travel_events,
0264                     struct time_travel_event,
0265                     list);
0266 }
0267 
0268 static void __time_travel_add_event(struct time_travel_event *e,
0269                     unsigned long long time)
0270 {
0271     struct time_travel_event *tmp;
0272     bool inserted = false;
0273     unsigned long flags;
0274 
0275     if (e->pending)
0276         return;
0277 
0278     e->pending = true;
0279     e->time = time;
0280 
0281     local_irq_save(flags);
0282     list_for_each_entry(tmp, &time_travel_events, list) {
0283         /*
0284          * Add the new entry before one with higher time,
0285          * or if they're equal and both on stack, because
0286          * in that case we need to unwind the stack in the
0287          * right order, and the later event (timer sleep
0288          * or such) must be dequeued first.
0289          */
0290         if ((tmp->time > e->time) ||
0291             (tmp->time == e->time && tmp->onstack && e->onstack)) {
0292             list_add_tail(&e->list, &tmp->list);
0293             inserted = true;
0294             break;
0295         }
0296     }
0297 
0298     if (!inserted)
0299         list_add_tail(&e->list, &time_travel_events);
0300 
0301     tmp = time_travel_first_event();
0302     time_travel_ext_update_request(tmp->time);
0303     time_travel_next_event = tmp->time;
0304     local_irq_restore(flags);
0305 }
0306 
0307 static void time_travel_add_event(struct time_travel_event *e,
0308                   unsigned long long time)
0309 {
0310     if (WARN_ON(!e->fn))
0311         return;
0312 
0313     __time_travel_add_event(e, time);
0314 }
0315 
0316 void time_travel_add_event_rel(struct time_travel_event *e,
0317                    unsigned long long delay_ns)
0318 {
0319     time_travel_add_event(e, time_travel_time + delay_ns);
0320 }
0321 
0322 void time_travel_periodic_timer(struct time_travel_event *e)
0323 {
0324     time_travel_add_event(&time_travel_timer_event,
0325                   time_travel_time + time_travel_timer_interval);
0326     deliver_alarm();
0327 }
0328 
0329 void deliver_time_travel_irqs(void)
0330 {
0331     struct time_travel_event *e;
0332     unsigned long flags;
0333 
0334     /*
0335      * Don't do anything for most cases. Note that because here we have
0336      * to disable IRQs (and re-enable later) we'll actually recurse at
0337      * the end of the function, so this is strictly necessary.
0338      */
0339     if (likely(list_empty(&time_travel_irqs)))
0340         return;
0341 
0342     local_irq_save(flags);
0343     irq_enter();
0344     while ((e = list_first_entry_or_null(&time_travel_irqs,
0345                          struct time_travel_event,
0346                          list))) {
0347         list_del(&e->list);
0348         e->pending = false;
0349         e->fn(e);
0350     }
0351     irq_exit();
0352     local_irq_restore(flags);
0353 }
0354 
0355 static void time_travel_deliver_event(struct time_travel_event *e)
0356 {
0357     if (e == &time_travel_timer_event) {
0358         /*
0359          * deliver_alarm() does the irq_enter/irq_exit
0360          * by itself, so must handle it specially here
0361          */
0362         e->fn(e);
0363     } else if (irqs_disabled()) {
0364         list_add_tail(&e->list, &time_travel_irqs);
0365         /*
0366          * set pending again, it was set to false when the
0367          * event was deleted from the original list, but
0368          * now it's still pending until we deliver the IRQ.
0369          */
0370         e->pending = true;
0371     } else {
0372         unsigned long flags;
0373 
0374         local_irq_save(flags);
0375         irq_enter();
0376         e->fn(e);
0377         irq_exit();
0378         local_irq_restore(flags);
0379     }
0380 }
0381 
0382 bool time_travel_del_event(struct time_travel_event *e)
0383 {
0384     unsigned long flags;
0385 
0386     if (!e->pending)
0387         return false;
0388     local_irq_save(flags);
0389     list_del(&e->list);
0390     e->pending = false;
0391     local_irq_restore(flags);
0392     return true;
0393 }
0394 
0395 static void time_travel_update_time(unsigned long long next, bool idle)
0396 {
0397     struct time_travel_event ne = {
0398         .onstack = true,
0399     };
0400     struct time_travel_event *e;
0401     bool finished = idle;
0402 
0403     /* add it without a handler - we deal with that specifically below */
0404     __time_travel_add_event(&ne, next);
0405 
0406     do {
0407         e = time_travel_first_event();
0408 
0409         BUG_ON(!e);
0410         __time_travel_update_time(e->time, idle);
0411 
0412         /* new events may have been inserted while we were waiting */
0413         if (e == time_travel_first_event()) {
0414             BUG_ON(!time_travel_del_event(e));
0415             BUG_ON(time_travel_time != e->time);
0416 
0417             if (e == &ne) {
0418                 finished = true;
0419             } else {
0420                 if (e->onstack)
0421                     panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
0422                           time_travel_time, e->time, e);
0423                 time_travel_deliver_event(e);
0424             }
0425         }
0426 
0427         e = time_travel_first_event();
0428         if (e)
0429             time_travel_ext_update_request(e->time);
0430     } while (ne.pending && !finished);
0431 
0432     time_travel_del_event(&ne);
0433 }
0434 
0435 void time_travel_ndelay(unsigned long nsec)
0436 {
0437     time_travel_update_time(time_travel_time + nsec, false);
0438 }
0439 EXPORT_SYMBOL(time_travel_ndelay);
0440 
0441 void time_travel_add_irq_event(struct time_travel_event *e)
0442 {
0443     BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
0444 
0445     time_travel_ext_get_time();
0446     /*
0447      * We could model interrupt latency here, for now just
0448      * don't have any latency at all and request the exact
0449      * same time (again) to run the interrupt...
0450      */
0451     time_travel_add_event(e, time_travel_time);
0452 }
0453 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
0454 
0455 static void time_travel_oneshot_timer(struct time_travel_event *e)
0456 {
0457     deliver_alarm();
0458 }
0459 
0460 void time_travel_sleep(void)
0461 {
0462     /*
0463      * Wait "forever" (using S64_MAX because there are some potential
0464      * wrapping issues, especially with the current TT_MODE_EXTERNAL
0465      * controller application.
0466      */
0467     unsigned long long next = S64_MAX;
0468 
0469     if (time_travel_mode == TT_MODE_BASIC)
0470         os_timer_disable();
0471 
0472     time_travel_update_time(next, true);
0473 
0474     if (time_travel_mode == TT_MODE_BASIC &&
0475         time_travel_timer_event.pending) {
0476         if (time_travel_timer_event.fn == time_travel_periodic_timer) {
0477             /*
0478              * This is somewhat wrong - we should get the first
0479              * one sooner like the os_timer_one_shot() below...
0480              */
0481             os_timer_set_interval(time_travel_timer_interval);
0482         } else {
0483             os_timer_one_shot(time_travel_timer_event.time - next);
0484         }
0485     }
0486 }
0487 
0488 static void time_travel_handle_real_alarm(void)
0489 {
0490     time_travel_set_time(time_travel_next_event);
0491 
0492     time_travel_del_event(&time_travel_timer_event);
0493 
0494     if (time_travel_timer_event.fn == time_travel_periodic_timer)
0495         time_travel_add_event(&time_travel_timer_event,
0496                       time_travel_time +
0497                       time_travel_timer_interval);
0498 }
0499 
0500 static void time_travel_set_interval(unsigned long long interval)
0501 {
0502     time_travel_timer_interval = interval;
0503 }
0504 
0505 static int time_travel_connect_external(const char *socket)
0506 {
0507     const char *sep;
0508     unsigned long long id = (unsigned long long)-1;
0509     int rc;
0510 
0511     if ((sep = strchr(socket, ':'))) {
0512         char buf[25] = {};
0513         if (sep - socket > sizeof(buf) - 1)
0514             goto invalid_number;
0515 
0516         memcpy(buf, socket, sep - socket);
0517         if (kstrtoull(buf, 0, &id)) {
0518 invalid_number:
0519             panic("time-travel: invalid external ID in string '%s'\n",
0520                   socket);
0521             return -EINVAL;
0522         }
0523 
0524         socket = sep + 1;
0525     }
0526 
0527     rc = os_connect_socket(socket);
0528     if (rc < 0) {
0529         panic("time-travel: failed to connect to external socket %s\n",
0530               socket);
0531         return rc;
0532     }
0533 
0534     time_travel_ext_fd = rc;
0535 
0536     time_travel_ext_req(UM_TIMETRAVEL_START, id);
0537 
0538     return 1;
0539 }
0540 
0541 static void time_travel_set_start(void)
0542 {
0543     if (time_travel_start_set)
0544         return;
0545 
0546     switch (time_travel_mode) {
0547     case TT_MODE_EXTERNAL:
0548         time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
0549         /* controller gave us the *current* time, so adjust by that */
0550         time_travel_ext_get_time();
0551         time_travel_start -= time_travel_time;
0552         break;
0553     case TT_MODE_INFCPU:
0554     case TT_MODE_BASIC:
0555         if (!time_travel_start_set)
0556             time_travel_start = os_persistent_clock_emulation();
0557         break;
0558     case TT_MODE_OFF:
0559         /* we just read the host clock with os_persistent_clock_emulation() */
0560         break;
0561     }
0562 
0563     time_travel_start_set = true;
0564 }
0565 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
0566 #define time_travel_start_set 0
0567 #define time_travel_start 0
0568 #define time_travel_time 0
0569 #define time_travel_ext_waiting 0
0570 
0571 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
0572 {
0573 }
0574 
0575 static inline void time_travel_handle_real_alarm(void)
0576 {
0577 }
0578 
0579 static void time_travel_set_interval(unsigned long long interval)
0580 {
0581 }
0582 
0583 static inline void time_travel_set_start(void)
0584 {
0585 }
0586 
0587 /* fail link if this actually gets used */
0588 extern u64 time_travel_ext_req(u32 op, u64 time);
0589 
0590 /* these are empty macros so the struct/fn need not exist */
0591 #define time_travel_add_event(e, time) do { } while (0)
0592 /* externally not usable - redefine here so we can */
0593 #undef time_travel_del_event
0594 #define time_travel_del_event(e) do { } while (0)
0595 #endif
0596 
0597 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
0598 {
0599     unsigned long flags;
0600 
0601     /*
0602      * In basic time-travel mode we still get real interrupts
0603      * (signals) but since we don't read time from the OS, we
0604      * must update the simulated time here to the expiry when
0605      * we get a signal.
0606      * This is not the case in inf-cpu mode, since there we
0607      * never get any real signals from the OS.
0608      */
0609     if (time_travel_mode == TT_MODE_BASIC)
0610         time_travel_handle_real_alarm();
0611 
0612     local_irq_save(flags);
0613     do_IRQ(TIMER_IRQ, regs);
0614     local_irq_restore(flags);
0615 }
0616 
0617 static int itimer_shutdown(struct clock_event_device *evt)
0618 {
0619     if (time_travel_mode != TT_MODE_OFF)
0620         time_travel_del_event(&time_travel_timer_event);
0621 
0622     if (time_travel_mode != TT_MODE_INFCPU &&
0623         time_travel_mode != TT_MODE_EXTERNAL)
0624         os_timer_disable();
0625 
0626     return 0;
0627 }
0628 
0629 static int itimer_set_periodic(struct clock_event_device *evt)
0630 {
0631     unsigned long long interval = NSEC_PER_SEC / HZ;
0632 
0633     if (time_travel_mode != TT_MODE_OFF) {
0634         time_travel_del_event(&time_travel_timer_event);
0635         time_travel_set_event_fn(&time_travel_timer_event,
0636                      time_travel_periodic_timer);
0637         time_travel_set_interval(interval);
0638         time_travel_add_event(&time_travel_timer_event,
0639                       time_travel_time + interval);
0640     }
0641 
0642     if (time_travel_mode != TT_MODE_INFCPU &&
0643         time_travel_mode != TT_MODE_EXTERNAL)
0644         os_timer_set_interval(interval);
0645 
0646     return 0;
0647 }
0648 
0649 static int itimer_next_event(unsigned long delta,
0650                  struct clock_event_device *evt)
0651 {
0652     delta += 1;
0653 
0654     if (time_travel_mode != TT_MODE_OFF) {
0655         time_travel_del_event(&time_travel_timer_event);
0656         time_travel_set_event_fn(&time_travel_timer_event,
0657                      time_travel_oneshot_timer);
0658         time_travel_add_event(&time_travel_timer_event,
0659                       time_travel_time + delta);
0660     }
0661 
0662     if (time_travel_mode != TT_MODE_INFCPU &&
0663         time_travel_mode != TT_MODE_EXTERNAL)
0664         return os_timer_one_shot(delta);
0665 
0666     return 0;
0667 }
0668 
0669 static int itimer_one_shot(struct clock_event_device *evt)
0670 {
0671     return itimer_next_event(0, evt);
0672 }
0673 
0674 static struct clock_event_device timer_clockevent = {
0675     .name           = "posix-timer",
0676     .rating         = 250,
0677     .cpumask        = cpu_possible_mask,
0678     .features       = CLOCK_EVT_FEAT_PERIODIC |
0679                   CLOCK_EVT_FEAT_ONESHOT,
0680     .set_state_shutdown = itimer_shutdown,
0681     .set_state_periodic = itimer_set_periodic,
0682     .set_state_oneshot  = itimer_one_shot,
0683     .set_next_event     = itimer_next_event,
0684     .shift          = 0,
0685     .max_delta_ns       = 0xffffffff,
0686     .max_delta_ticks    = 0xffffffff,
0687     .min_delta_ns       = TIMER_MIN_DELTA,
0688     .min_delta_ticks    = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
0689     .irq            = 0,
0690     .mult           = 1,
0691 };
0692 
0693 static irqreturn_t um_timer(int irq, void *dev)
0694 {
0695     if (get_current()->mm != NULL)
0696     {
0697         /* userspace - relay signal, results in correct userspace timers */
0698         os_alarm_process(get_current()->mm->context.id.u.pid);
0699     }
0700 
0701     (*timer_clockevent.event_handler)(&timer_clockevent);
0702 
0703     return IRQ_HANDLED;
0704 }
0705 
0706 static u64 timer_read(struct clocksource *cs)
0707 {
0708     if (time_travel_mode != TT_MODE_OFF) {
0709         /*
0710          * We make reading the timer cost a bit so that we don't get
0711          * stuck in loops that expect time to move more than the
0712          * exact requested sleep amount, e.g. python's socket server,
0713          * see https://bugs.python.org/issue37026.
0714          *
0715          * However, don't do that when we're in interrupt or such as
0716          * then we might recurse into our own processing, and get to
0717          * even more waiting, and that's not good - it messes up the
0718          * "what do I do next" and onstack event we use to know when
0719          * to return from time_travel_update_time().
0720          */
0721         if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
0722             !time_travel_ext_waiting)
0723             time_travel_update_time(time_travel_time +
0724                         TIMER_MULTIPLIER,
0725                         false);
0726         return time_travel_time / TIMER_MULTIPLIER;
0727     }
0728 
0729     return os_nsecs() / TIMER_MULTIPLIER;
0730 }
0731 
0732 static struct clocksource timer_clocksource = {
0733     .name       = "timer",
0734     .rating     = 300,
0735     .read       = timer_read,
0736     .mask       = CLOCKSOURCE_MASK(64),
0737     .flags      = CLOCK_SOURCE_IS_CONTINUOUS,
0738 };
0739 
0740 static void __init um_timer_setup(void)
0741 {
0742     int err;
0743 
0744     err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
0745     if (err != 0)
0746         printk(KERN_ERR "register_timer : request_irq failed - "
0747                "errno = %d\n", -err);
0748 
0749     err = os_timer_create();
0750     if (err != 0) {
0751         printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
0752         return;
0753     }
0754 
0755     err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
0756     if (err) {
0757         printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
0758         return;
0759     }
0760     clockevents_register_device(&timer_clockevent);
0761 }
0762 
0763 void read_persistent_clock64(struct timespec64 *ts)
0764 {
0765     long long nsecs;
0766 
0767     time_travel_set_start();
0768 
0769     if (time_travel_mode != TT_MODE_OFF)
0770         nsecs = time_travel_start + time_travel_time;
0771     else
0772         nsecs = os_persistent_clock_emulation();
0773 
0774     set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
0775                   nsecs % NSEC_PER_SEC);
0776 }
0777 
0778 void __init time_init(void)
0779 {
0780     timer_set_signal_handler();
0781     late_time_init = um_timer_setup;
0782 }
0783 
0784 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0785 unsigned long calibrate_delay_is_known(void)
0786 {
0787     if (time_travel_mode == TT_MODE_INFCPU ||
0788         time_travel_mode == TT_MODE_EXTERNAL)
0789         return 1;
0790     return 0;
0791 }
0792 
0793 int setup_time_travel(char *str)
0794 {
0795     if (strcmp(str, "=inf-cpu") == 0) {
0796         time_travel_mode = TT_MODE_INFCPU;
0797         timer_clockevent.name = "time-travel-timer-infcpu";
0798         timer_clocksource.name = "time-travel-clock";
0799         return 1;
0800     }
0801 
0802     if (strncmp(str, "=ext:", 5) == 0) {
0803         time_travel_mode = TT_MODE_EXTERNAL;
0804         timer_clockevent.name = "time-travel-timer-external";
0805         timer_clocksource.name = "time-travel-clock-external";
0806         return time_travel_connect_external(str + 5);
0807     }
0808 
0809     if (!*str) {
0810         time_travel_mode = TT_MODE_BASIC;
0811         timer_clockevent.name = "time-travel-timer";
0812         timer_clocksource.name = "time-travel-clock";
0813         return 1;
0814     }
0815 
0816     return -EINVAL;
0817 }
0818 
0819 __setup("time-travel", setup_time_travel);
0820 __uml_help(setup_time_travel,
0821 "time-travel\n"
0822 "This option just enables basic time travel mode, in which the clock/timers\n"
0823 "inside the UML instance skip forward when there's nothing to do, rather than\n"
0824 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
0825 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
0826 "clock (but quicker when there's nothing to do).\n"
0827 "\n"
0828 "time-travel=inf-cpu\n"
0829 "This enables time travel mode with infinite processing power, in which there\n"
0830 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
0831 "guest - instantly. This can be useful for accurate simulation regardless of\n"
0832 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
0833 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
0834 "\n"
0835 "time-travel=ext:[ID:]/path/to/socket\n"
0836 "This enables time travel mode similar to =inf-cpu, except the system will\n"
0837 "use the given socket to coordinate with a central scheduler, in order to\n"
0838 "have more than one system simultaneously be on simulated time. The virtio\n"
0839 "driver code in UML knows about this so you can also simulate networks and\n"
0840 "devices using it, assuming the device has the right capabilities.\n"
0841 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
0842 
0843 int setup_time_travel_start(char *str)
0844 {
0845     int err;
0846 
0847     err = kstrtoull(str, 0, &time_travel_start);
0848     if (err)
0849         return err;
0850 
0851     time_travel_start_set = 1;
0852     return 1;
0853 }
0854 
0855 __setup("time-travel-start", setup_time_travel_start);
0856 __uml_help(setup_time_travel_start,
0857 "time-travel-start=<seconds>\n"
0858 "Configure the UML instance's wall clock to start at this value rather than\n"
0859 "the host's wall clock at the time of UML boot.\n");
0860 #endif