Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2017 - Cambridge Greys Ltd
0004  * Copyright (C) 2011 - 2014 Cisco Systems Inc
0005  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
0006  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
0007  *  Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
0008  */
0009 
0010 #include <linux/cpumask.h>
0011 #include <linux/hardirq.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/kernel_stat.h>
0014 #include <linux/module.h>
0015 #include <linux/sched.h>
0016 #include <linux/seq_file.h>
0017 #include <linux/slab.h>
0018 #include <as-layout.h>
0019 #include <kern_util.h>
0020 #include <os.h>
0021 #include <irq_user.h>
0022 #include <irq_kern.h>
0023 #include <linux/time-internal.h>
0024 
0025 
0026 extern void free_irqs(void);
0027 
0028 /* When epoll triggers we do not know why it did so
0029  * we can also have different IRQs for read and write.
0030  * This is why we keep a small irq_reg array for each fd -
0031  * one entry per IRQ type
0032  */
0033 struct irq_reg {
0034     void *id;
0035     int irq;
0036     /* it's cheaper to store this than to query it */
0037     int events;
0038     bool active;
0039     bool pending;
0040     bool wakeup;
0041 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0042     bool pending_on_resume;
0043     void (*timetravel_handler)(int, int, void *,
0044                    struct time_travel_event *);
0045     struct time_travel_event event;
0046 #endif
0047 };
0048 
0049 struct irq_entry {
0050     struct list_head list;
0051     int fd;
0052     struct irq_reg reg[NUM_IRQ_TYPES];
0053     bool suspended;
0054     bool sigio_workaround;
0055 };
0056 
0057 static DEFINE_SPINLOCK(irq_lock);
0058 static LIST_HEAD(active_fds);
0059 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
0060 static bool irqs_suspended;
0061 
0062 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
0063 {
0064 /*
0065  * irq->active guards against reentry
0066  * irq->pending accumulates pending requests
0067  * if pending is raised the irq_handler is re-run
0068  * until pending is cleared
0069  */
0070     if (irq->active) {
0071         irq->active = false;
0072 
0073         do {
0074             irq->pending = false;
0075             do_IRQ(irq->irq, regs);
0076         } while (irq->pending);
0077 
0078         irq->active = true;
0079     } else {
0080         irq->pending = true;
0081     }
0082 }
0083 
0084 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0085 static void irq_event_handler(struct time_travel_event *ev)
0086 {
0087     struct irq_reg *reg = container_of(ev, struct irq_reg, event);
0088 
0089     /* do nothing if suspended - just to cause a wakeup */
0090     if (irqs_suspended)
0091         return;
0092 
0093     generic_handle_irq(reg->irq);
0094 }
0095 
0096 static bool irq_do_timetravel_handler(struct irq_entry *entry,
0097                       enum um_irq_type t)
0098 {
0099     struct irq_reg *reg = &entry->reg[t];
0100 
0101     if (!reg->timetravel_handler)
0102         return false;
0103 
0104     /*
0105      * Handle all messages - we might get multiple even while
0106      * interrupts are already suspended, due to suspend order
0107      * etc. Note that time_travel_add_irq_event() will not add
0108      * an event twice, if it's pending already "first wins".
0109      */
0110     reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
0111 
0112     if (!reg->event.pending)
0113         return false;
0114 
0115     if (irqs_suspended)
0116         reg->pending_on_resume = true;
0117     return true;
0118 }
0119 #else
0120 static bool irq_do_timetravel_handler(struct irq_entry *entry,
0121                       enum um_irq_type t)
0122 {
0123     return false;
0124 }
0125 #endif
0126 
0127 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
0128                   struct uml_pt_regs *regs,
0129                   bool timetravel_handlers_only)
0130 {
0131     struct irq_reg *reg = &entry->reg[t];
0132 
0133     if (!reg->events)
0134         return;
0135 
0136     if (os_epoll_triggered(idx, reg->events) <= 0)
0137         return;
0138 
0139     if (irq_do_timetravel_handler(entry, t))
0140         return;
0141 
0142     /*
0143      * If we're called to only run time-travel handlers then don't
0144      * actually proceed but mark sigio as pending (if applicable).
0145      * For suspend/resume, timetravel_handlers_only may be true
0146      * despite time-travel not being configured and used.
0147      */
0148     if (timetravel_handlers_only) {
0149 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0150         mark_sigio_pending();
0151 #endif
0152         return;
0153     }
0154 
0155     irq_io_loop(reg, regs);
0156 }
0157 
0158 static void _sigio_handler(struct uml_pt_regs *regs,
0159                bool timetravel_handlers_only)
0160 {
0161     struct irq_entry *irq_entry;
0162     int n, i;
0163 
0164     if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
0165         return;
0166 
0167     while (1) {
0168         /* This is now lockless - epoll keeps back-referencesto the irqs
0169          * which have trigger it so there is no need to walk the irq
0170          * list and lock it every time. We avoid locking by turning off
0171          * IO for a specific fd by executing os_del_epoll_fd(fd) before
0172          * we do any changes to the actual data structures
0173          */
0174         n = os_waiting_for_events_epoll();
0175 
0176         if (n <= 0) {
0177             if (n == -EINTR)
0178                 continue;
0179             else
0180                 break;
0181         }
0182 
0183         for (i = 0; i < n ; i++) {
0184             enum um_irq_type t;
0185 
0186             irq_entry = os_epoll_get_data_pointer(i);
0187 
0188             for (t = 0; t < NUM_IRQ_TYPES; t++)
0189                 sigio_reg_handler(i, irq_entry, t, regs,
0190                           timetravel_handlers_only);
0191         }
0192     }
0193 
0194     if (!timetravel_handlers_only)
0195         free_irqs();
0196 }
0197 
0198 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
0199 {
0200     _sigio_handler(regs, irqs_suspended);
0201 }
0202 
0203 static struct irq_entry *get_irq_entry_by_fd(int fd)
0204 {
0205     struct irq_entry *walk;
0206 
0207     lockdep_assert_held(&irq_lock);
0208 
0209     list_for_each_entry(walk, &active_fds, list) {
0210         if (walk->fd == fd)
0211             return walk;
0212     }
0213 
0214     return NULL;
0215 }
0216 
0217 static void free_irq_entry(struct irq_entry *to_free, bool remove)
0218 {
0219     if (!to_free)
0220         return;
0221 
0222     if (remove)
0223         os_del_epoll_fd(to_free->fd);
0224     list_del(&to_free->list);
0225     kfree(to_free);
0226 }
0227 
0228 static bool update_irq_entry(struct irq_entry *entry)
0229 {
0230     enum um_irq_type i;
0231     int events = 0;
0232 
0233     for (i = 0; i < NUM_IRQ_TYPES; i++)
0234         events |= entry->reg[i].events;
0235 
0236     if (events) {
0237         /* will modify (instead of add) if needed */
0238         os_add_epoll_fd(events, entry->fd, entry);
0239         return true;
0240     }
0241 
0242     os_del_epoll_fd(entry->fd);
0243     return false;
0244 }
0245 
0246 static void update_or_free_irq_entry(struct irq_entry *entry)
0247 {
0248     if (!update_irq_entry(entry))
0249         free_irq_entry(entry, false);
0250 }
0251 
0252 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
0253                void (*timetravel_handler)(int, int, void *,
0254                           struct time_travel_event *))
0255 {
0256     struct irq_entry *irq_entry;
0257     int err, events = os_event_mask(type);
0258     unsigned long flags;
0259 
0260     err = os_set_fd_async(fd);
0261     if (err < 0)
0262         goto out;
0263 
0264     spin_lock_irqsave(&irq_lock, flags);
0265     irq_entry = get_irq_entry_by_fd(fd);
0266     if (irq_entry) {
0267         /* cannot register the same FD twice with the same type */
0268         if (WARN_ON(irq_entry->reg[type].events)) {
0269             err = -EALREADY;
0270             goto out_unlock;
0271         }
0272 
0273         /* temporarily disable to avoid IRQ-side locking */
0274         os_del_epoll_fd(fd);
0275     } else {
0276         irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
0277         if (!irq_entry) {
0278             err = -ENOMEM;
0279             goto out_unlock;
0280         }
0281         irq_entry->fd = fd;
0282         list_add_tail(&irq_entry->list, &active_fds);
0283         maybe_sigio_broken(fd);
0284     }
0285 
0286     irq_entry->reg[type].id = dev_id;
0287     irq_entry->reg[type].irq = irq;
0288     irq_entry->reg[type].active = true;
0289     irq_entry->reg[type].events = events;
0290 
0291 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0292     if (um_irq_timetravel_handler_used()) {
0293         irq_entry->reg[type].timetravel_handler = timetravel_handler;
0294         irq_entry->reg[type].event.fn = irq_event_handler;
0295     }
0296 #endif
0297 
0298     WARN_ON(!update_irq_entry(irq_entry));
0299     spin_unlock_irqrestore(&irq_lock, flags);
0300 
0301     return 0;
0302 out_unlock:
0303     spin_unlock_irqrestore(&irq_lock, flags);
0304 out:
0305     return err;
0306 }
0307 
0308 /*
0309  * Remove the entry or entries for a specific FD, if you
0310  * don't want to remove all the possible entries then use
0311  * um_free_irq() or deactivate_fd() instead.
0312  */
0313 void free_irq_by_fd(int fd)
0314 {
0315     struct irq_entry *to_free;
0316     unsigned long flags;
0317 
0318     spin_lock_irqsave(&irq_lock, flags);
0319     to_free = get_irq_entry_by_fd(fd);
0320     free_irq_entry(to_free, true);
0321     spin_unlock_irqrestore(&irq_lock, flags);
0322 }
0323 EXPORT_SYMBOL(free_irq_by_fd);
0324 
0325 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
0326 {
0327     struct irq_entry *entry;
0328     unsigned long flags;
0329 
0330     spin_lock_irqsave(&irq_lock, flags);
0331     list_for_each_entry(entry, &active_fds, list) {
0332         enum um_irq_type i;
0333 
0334         for (i = 0; i < NUM_IRQ_TYPES; i++) {
0335             struct irq_reg *reg = &entry->reg[i];
0336 
0337             if (!reg->events)
0338                 continue;
0339             if (reg->irq != irq)
0340                 continue;
0341             if (reg->id != dev)
0342                 continue;
0343 
0344             os_del_epoll_fd(entry->fd);
0345             reg->events = 0;
0346             update_or_free_irq_entry(entry);
0347             goto out;
0348         }
0349     }
0350 out:
0351     spin_unlock_irqrestore(&irq_lock, flags);
0352 }
0353 
0354 void deactivate_fd(int fd, int irqnum)
0355 {
0356     struct irq_entry *entry;
0357     unsigned long flags;
0358     enum um_irq_type i;
0359 
0360     os_del_epoll_fd(fd);
0361 
0362     spin_lock_irqsave(&irq_lock, flags);
0363     entry = get_irq_entry_by_fd(fd);
0364     if (!entry)
0365         goto out;
0366 
0367     for (i = 0; i < NUM_IRQ_TYPES; i++) {
0368         if (!entry->reg[i].events)
0369             continue;
0370         if (entry->reg[i].irq == irqnum)
0371             entry->reg[i].events = 0;
0372     }
0373 
0374     update_or_free_irq_entry(entry);
0375 out:
0376     spin_unlock_irqrestore(&irq_lock, flags);
0377 
0378     ignore_sigio_fd(fd);
0379 }
0380 EXPORT_SYMBOL(deactivate_fd);
0381 
0382 /*
0383  * Called just before shutdown in order to provide a clean exec
0384  * environment in case the system is rebooting.  No locking because
0385  * that would cause a pointless shutdown hang if something hadn't
0386  * released the lock.
0387  */
0388 int deactivate_all_fds(void)
0389 {
0390     struct irq_entry *entry;
0391 
0392     /* Stop IO. The IRQ loop has no lock so this is our
0393      * only way of making sure we are safe to dispose
0394      * of all IRQ handlers
0395      */
0396     os_set_ioignore();
0397 
0398     /* we can no longer call kfree() here so just deactivate */
0399     list_for_each_entry(entry, &active_fds, list)
0400         os_del_epoll_fd(entry->fd);
0401     os_close_epoll_fd();
0402     return 0;
0403 }
0404 
0405 /*
0406  * do_IRQ handles all normal device IRQs (the special
0407  * SMP cross-CPU interrupts have their own specific
0408  * handlers).
0409  */
0410 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
0411 {
0412     struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
0413     irq_enter();
0414     generic_handle_irq(irq);
0415     irq_exit();
0416     set_irq_regs(old_regs);
0417     return 1;
0418 }
0419 
0420 void um_free_irq(int irq, void *dev)
0421 {
0422     if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
0423          "freeing invalid irq %d", irq))
0424         return;
0425 
0426     free_irq_by_irq_and_dev(irq, dev);
0427     free_irq(irq, dev);
0428     clear_bit(irq, irqs_allocated);
0429 }
0430 EXPORT_SYMBOL(um_free_irq);
0431 
0432 static int
0433 _um_request_irq(int irq, int fd, enum um_irq_type type,
0434         irq_handler_t handler, unsigned long irqflags,
0435         const char *devname, void *dev_id,
0436         void (*timetravel_handler)(int, int, void *,
0437                        struct time_travel_event *))
0438 {
0439     int err;
0440 
0441     if (irq == UM_IRQ_ALLOC) {
0442         int i;
0443 
0444         for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
0445             if (!test_and_set_bit(i, irqs_allocated)) {
0446                 irq = i;
0447                 break;
0448             }
0449         }
0450     }
0451 
0452     if (irq < 0)
0453         return -ENOSPC;
0454 
0455     if (fd != -1) {
0456         err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
0457         if (err)
0458             goto error;
0459     }
0460 
0461     err = request_irq(irq, handler, irqflags, devname, dev_id);
0462     if (err < 0)
0463         goto error;
0464 
0465     return irq;
0466 error:
0467     clear_bit(irq, irqs_allocated);
0468     return err;
0469 }
0470 
0471 int um_request_irq(int irq, int fd, enum um_irq_type type,
0472            irq_handler_t handler, unsigned long irqflags,
0473            const char *devname, void *dev_id)
0474 {
0475     return _um_request_irq(irq, fd, type, handler, irqflags,
0476                    devname, dev_id, NULL);
0477 }
0478 EXPORT_SYMBOL(um_request_irq);
0479 
0480 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0481 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
0482               irq_handler_t handler, unsigned long irqflags,
0483               const char *devname, void *dev_id,
0484               void (*timetravel_handler)(int, int, void *,
0485                          struct time_travel_event *))
0486 {
0487     return _um_request_irq(irq, fd, type, handler, irqflags,
0488                    devname, dev_id, timetravel_handler);
0489 }
0490 EXPORT_SYMBOL(um_request_irq_tt);
0491 
0492 void sigio_run_timetravel_handlers(void)
0493 {
0494     _sigio_handler(NULL, true);
0495 }
0496 #endif
0497 
0498 #ifdef CONFIG_PM_SLEEP
0499 void um_irqs_suspend(void)
0500 {
0501     struct irq_entry *entry;
0502     unsigned long flags;
0503 
0504     irqs_suspended = true;
0505 
0506     spin_lock_irqsave(&irq_lock, flags);
0507     list_for_each_entry(entry, &active_fds, list) {
0508         enum um_irq_type t;
0509         bool clear = true;
0510 
0511         for (t = 0; t < NUM_IRQ_TYPES; t++) {
0512             if (!entry->reg[t].events)
0513                 continue;
0514 
0515             /*
0516              * For the SIGIO_WRITE_IRQ, which is used to handle the
0517              * SIGIO workaround thread, we need special handling:
0518              * enable wake for it itself, but below we tell it about
0519              * any FDs that should be suspended.
0520              */
0521             if (entry->reg[t].wakeup ||
0522                 entry->reg[t].irq == SIGIO_WRITE_IRQ
0523 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0524                 || entry->reg[t].timetravel_handler
0525 #endif
0526                 ) {
0527                 clear = false;
0528                 break;
0529             }
0530         }
0531 
0532         if (clear) {
0533             entry->suspended = true;
0534             os_clear_fd_async(entry->fd);
0535             entry->sigio_workaround =
0536                 !__ignore_sigio_fd(entry->fd);
0537         }
0538     }
0539     spin_unlock_irqrestore(&irq_lock, flags);
0540 }
0541 
0542 void um_irqs_resume(void)
0543 {
0544     struct irq_entry *entry;
0545     unsigned long flags;
0546 
0547 
0548     local_irq_save(flags);
0549 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0550     /*
0551      * We don't need to lock anything here since we're in resume
0552      * and nothing else is running, but have disabled IRQs so we
0553      * don't try anything else with the interrupt list from there.
0554      */
0555     list_for_each_entry(entry, &active_fds, list) {
0556         enum um_irq_type t;
0557 
0558         for (t = 0; t < NUM_IRQ_TYPES; t++) {
0559             struct irq_reg *reg = &entry->reg[t];
0560 
0561             if (reg->pending_on_resume) {
0562                 irq_enter();
0563                 generic_handle_irq(reg->irq);
0564                 irq_exit();
0565                 reg->pending_on_resume = false;
0566             }
0567         }
0568     }
0569 #endif
0570 
0571     spin_lock(&irq_lock);
0572     list_for_each_entry(entry, &active_fds, list) {
0573         if (entry->suspended) {
0574             int err = os_set_fd_async(entry->fd);
0575 
0576             WARN(err < 0, "os_set_fd_async returned %d\n", err);
0577             entry->suspended = false;
0578 
0579             if (entry->sigio_workaround) {
0580                 err = __add_sigio_fd(entry->fd);
0581                 WARN(err < 0, "add_sigio_returned %d\n", err);
0582             }
0583         }
0584     }
0585     spin_unlock_irqrestore(&irq_lock, flags);
0586 
0587     irqs_suspended = false;
0588     send_sigio_to_self();
0589 }
0590 
0591 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
0592 {
0593     struct irq_entry *entry;
0594     unsigned long flags;
0595 
0596     spin_lock_irqsave(&irq_lock, flags);
0597     list_for_each_entry(entry, &active_fds, list) {
0598         enum um_irq_type t;
0599 
0600         for (t = 0; t < NUM_IRQ_TYPES; t++) {
0601             if (!entry->reg[t].events)
0602                 continue;
0603 
0604             if (entry->reg[t].irq != d->irq)
0605                 continue;
0606             entry->reg[t].wakeup = on;
0607             goto unlock;
0608         }
0609     }
0610 unlock:
0611     spin_unlock_irqrestore(&irq_lock, flags);
0612     return 0;
0613 }
0614 #else
0615 #define normal_irq_set_wake NULL
0616 #endif
0617 
0618 /*
0619  * irq_chip must define at least enable/disable and ack when
0620  * the edge handler is used.
0621  */
0622 static void dummy(struct irq_data *d)
0623 {
0624 }
0625 
0626 /* This is used for everything other than the timer. */
0627 static struct irq_chip normal_irq_type = {
0628     .name = "SIGIO",
0629     .irq_disable = dummy,
0630     .irq_enable = dummy,
0631     .irq_ack = dummy,
0632     .irq_mask = dummy,
0633     .irq_unmask = dummy,
0634     .irq_set_wake = normal_irq_set_wake,
0635 };
0636 
0637 static struct irq_chip alarm_irq_type = {
0638     .name = "SIGALRM",
0639     .irq_disable = dummy,
0640     .irq_enable = dummy,
0641     .irq_ack = dummy,
0642     .irq_mask = dummy,
0643     .irq_unmask = dummy,
0644 };
0645 
0646 void __init init_IRQ(void)
0647 {
0648     int i;
0649 
0650     irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
0651 
0652     for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
0653         irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
0654     /* Initialize EPOLL Loop */
0655     os_setup_epoll();
0656 }
0657 
0658 /*
0659  * IRQ stack entry and exit:
0660  *
0661  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
0662  * and switch over to the IRQ stack after some preparation.  We use
0663  * sigaltstack to receive signals on a separate stack from the start.
0664  * These two functions make sure the rest of the kernel won't be too
0665  * upset by being on a different stack.  The IRQ stack has a
0666  * thread_info structure at the bottom so that current et al continue
0667  * to work.
0668  *
0669  * to_irq_stack copies the current task's thread_info to the IRQ stack
0670  * thread_info and sets the tasks's stack to point to the IRQ stack.
0671  *
0672  * from_irq_stack copies the thread_info struct back (flags may have
0673  * been modified) and resets the task's stack pointer.
0674  *
0675  * Tricky bits -
0676  *
0677  * What happens when two signals race each other?  UML doesn't block
0678  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
0679  * could arrive while a previous one is still setting up the
0680  * thread_info.
0681  *
0682  * There are three cases -
0683  *     The first interrupt on the stack - sets up the thread_info and
0684  * handles the interrupt
0685  *     A nested interrupt interrupting the copying of the thread_info -
0686  * can't handle the interrupt, as the stack is in an unknown state
0687  *     A nested interrupt not interrupting the copying of the
0688  * thread_info - doesn't do any setup, just handles the interrupt
0689  *
0690  * The first job is to figure out whether we interrupted stack setup.
0691  * This is done by xchging the signal mask with thread_info->pending.
0692  * If the value that comes back is zero, then there is no setup in
0693  * progress, and the interrupt can be handled.  If the value is
0694  * non-zero, then there is stack setup in progress.  In order to have
0695  * the interrupt handled, we leave our signal in the mask, and it will
0696  * be handled by the upper handler after it has set up the stack.
0697  *
0698  * Next is to figure out whether we are the outer handler or a nested
0699  * one.  As part of setting up the stack, thread_info->real_thread is
0700  * set to non-NULL (and is reset to NULL on exit).  This is the
0701  * nesting indicator.  If it is non-NULL, then the stack is already
0702  * set up and the handler can run.
0703  */
0704 
0705 static unsigned long pending_mask;
0706 
0707 unsigned long to_irq_stack(unsigned long *mask_out)
0708 {
0709     struct thread_info *ti;
0710     unsigned long mask, old;
0711     int nested;
0712 
0713     mask = xchg(&pending_mask, *mask_out);
0714     if (mask != 0) {
0715         /*
0716          * If any interrupts come in at this point, we want to
0717          * make sure that their bits aren't lost by our
0718          * putting our bit in.  So, this loop accumulates bits
0719          * until xchg returns the same value that we put in.
0720          * When that happens, there were no new interrupts,
0721          * and pending_mask contains a bit for each interrupt
0722          * that came in.
0723          */
0724         old = *mask_out;
0725         do {
0726             old |= mask;
0727             mask = xchg(&pending_mask, old);
0728         } while (mask != old);
0729         return 1;
0730     }
0731 
0732     ti = current_thread_info();
0733     nested = (ti->real_thread != NULL);
0734     if (!nested) {
0735         struct task_struct *task;
0736         struct thread_info *tti;
0737 
0738         task = cpu_tasks[ti->cpu].task;
0739         tti = task_thread_info(task);
0740 
0741         *ti = *tti;
0742         ti->real_thread = tti;
0743         task->stack = ti;
0744     }
0745 
0746     mask = xchg(&pending_mask, 0);
0747     *mask_out |= mask | nested;
0748     return 0;
0749 }
0750 
0751 unsigned long from_irq_stack(int nested)
0752 {
0753     struct thread_info *ti, *to;
0754     unsigned long mask;
0755 
0756     ti = current_thread_info();
0757 
0758     pending_mask = 1;
0759 
0760     to = ti->real_thread;
0761     current->stack = to;
0762     ti->real_thread = NULL;
0763     *to = *ti;
0764 
0765     mask = xchg(&pending_mask, 0);
0766     return mask & ~1;
0767 }
0768