0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/cpumask.h>
0011 #include <linux/hardirq.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/kernel_stat.h>
0014 #include <linux/module.h>
0015 #include <linux/sched.h>
0016 #include <linux/seq_file.h>
0017 #include <linux/slab.h>
0018 #include <as-layout.h>
0019 #include <kern_util.h>
0020 #include <os.h>
0021 #include <irq_user.h>
0022 #include <irq_kern.h>
0023 #include <linux/time-internal.h>
0024
0025
0026 extern void free_irqs(void);
0027
0028
0029
0030
0031
0032
0033 struct irq_reg {
0034 void *id;
0035 int irq;
0036
0037 int events;
0038 bool active;
0039 bool pending;
0040 bool wakeup;
0041 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0042 bool pending_on_resume;
0043 void (*timetravel_handler)(int, int, void *,
0044 struct time_travel_event *);
0045 struct time_travel_event event;
0046 #endif
0047 };
0048
0049 struct irq_entry {
0050 struct list_head list;
0051 int fd;
0052 struct irq_reg reg[NUM_IRQ_TYPES];
0053 bool suspended;
0054 bool sigio_workaround;
0055 };
0056
0057 static DEFINE_SPINLOCK(irq_lock);
0058 static LIST_HEAD(active_fds);
0059 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
0060 static bool irqs_suspended;
0061
0062 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
0063 {
0064
0065
0066
0067
0068
0069
0070 if (irq->active) {
0071 irq->active = false;
0072
0073 do {
0074 irq->pending = false;
0075 do_IRQ(irq->irq, regs);
0076 } while (irq->pending);
0077
0078 irq->active = true;
0079 } else {
0080 irq->pending = true;
0081 }
0082 }
0083
0084 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0085 static void irq_event_handler(struct time_travel_event *ev)
0086 {
0087 struct irq_reg *reg = container_of(ev, struct irq_reg, event);
0088
0089
0090 if (irqs_suspended)
0091 return;
0092
0093 generic_handle_irq(reg->irq);
0094 }
0095
0096 static bool irq_do_timetravel_handler(struct irq_entry *entry,
0097 enum um_irq_type t)
0098 {
0099 struct irq_reg *reg = &entry->reg[t];
0100
0101 if (!reg->timetravel_handler)
0102 return false;
0103
0104
0105
0106
0107
0108
0109
0110 reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event);
0111
0112 if (!reg->event.pending)
0113 return false;
0114
0115 if (irqs_suspended)
0116 reg->pending_on_resume = true;
0117 return true;
0118 }
0119 #else
0120 static bool irq_do_timetravel_handler(struct irq_entry *entry,
0121 enum um_irq_type t)
0122 {
0123 return false;
0124 }
0125 #endif
0126
0127 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
0128 struct uml_pt_regs *regs,
0129 bool timetravel_handlers_only)
0130 {
0131 struct irq_reg *reg = &entry->reg[t];
0132
0133 if (!reg->events)
0134 return;
0135
0136 if (os_epoll_triggered(idx, reg->events) <= 0)
0137 return;
0138
0139 if (irq_do_timetravel_handler(entry, t))
0140 return;
0141
0142
0143
0144
0145
0146
0147
0148 if (timetravel_handlers_only) {
0149 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0150 mark_sigio_pending();
0151 #endif
0152 return;
0153 }
0154
0155 irq_io_loop(reg, regs);
0156 }
0157
0158 static void _sigio_handler(struct uml_pt_regs *regs,
0159 bool timetravel_handlers_only)
0160 {
0161 struct irq_entry *irq_entry;
0162 int n, i;
0163
0164 if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
0165 return;
0166
0167 while (1) {
0168
0169
0170
0171
0172
0173
0174 n = os_waiting_for_events_epoll();
0175
0176 if (n <= 0) {
0177 if (n == -EINTR)
0178 continue;
0179 else
0180 break;
0181 }
0182
0183 for (i = 0; i < n ; i++) {
0184 enum um_irq_type t;
0185
0186 irq_entry = os_epoll_get_data_pointer(i);
0187
0188 for (t = 0; t < NUM_IRQ_TYPES; t++)
0189 sigio_reg_handler(i, irq_entry, t, regs,
0190 timetravel_handlers_only);
0191 }
0192 }
0193
0194 if (!timetravel_handlers_only)
0195 free_irqs();
0196 }
0197
0198 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
0199 {
0200 _sigio_handler(regs, irqs_suspended);
0201 }
0202
0203 static struct irq_entry *get_irq_entry_by_fd(int fd)
0204 {
0205 struct irq_entry *walk;
0206
0207 lockdep_assert_held(&irq_lock);
0208
0209 list_for_each_entry(walk, &active_fds, list) {
0210 if (walk->fd == fd)
0211 return walk;
0212 }
0213
0214 return NULL;
0215 }
0216
0217 static void free_irq_entry(struct irq_entry *to_free, bool remove)
0218 {
0219 if (!to_free)
0220 return;
0221
0222 if (remove)
0223 os_del_epoll_fd(to_free->fd);
0224 list_del(&to_free->list);
0225 kfree(to_free);
0226 }
0227
0228 static bool update_irq_entry(struct irq_entry *entry)
0229 {
0230 enum um_irq_type i;
0231 int events = 0;
0232
0233 for (i = 0; i < NUM_IRQ_TYPES; i++)
0234 events |= entry->reg[i].events;
0235
0236 if (events) {
0237
0238 os_add_epoll_fd(events, entry->fd, entry);
0239 return true;
0240 }
0241
0242 os_del_epoll_fd(entry->fd);
0243 return false;
0244 }
0245
0246 static void update_or_free_irq_entry(struct irq_entry *entry)
0247 {
0248 if (!update_irq_entry(entry))
0249 free_irq_entry(entry, false);
0250 }
0251
0252 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
0253 void (*timetravel_handler)(int, int, void *,
0254 struct time_travel_event *))
0255 {
0256 struct irq_entry *irq_entry;
0257 int err, events = os_event_mask(type);
0258 unsigned long flags;
0259
0260 err = os_set_fd_async(fd);
0261 if (err < 0)
0262 goto out;
0263
0264 spin_lock_irqsave(&irq_lock, flags);
0265 irq_entry = get_irq_entry_by_fd(fd);
0266 if (irq_entry) {
0267
0268 if (WARN_ON(irq_entry->reg[type].events)) {
0269 err = -EALREADY;
0270 goto out_unlock;
0271 }
0272
0273
0274 os_del_epoll_fd(fd);
0275 } else {
0276 irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
0277 if (!irq_entry) {
0278 err = -ENOMEM;
0279 goto out_unlock;
0280 }
0281 irq_entry->fd = fd;
0282 list_add_tail(&irq_entry->list, &active_fds);
0283 maybe_sigio_broken(fd);
0284 }
0285
0286 irq_entry->reg[type].id = dev_id;
0287 irq_entry->reg[type].irq = irq;
0288 irq_entry->reg[type].active = true;
0289 irq_entry->reg[type].events = events;
0290
0291 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0292 if (um_irq_timetravel_handler_used()) {
0293 irq_entry->reg[type].timetravel_handler = timetravel_handler;
0294 irq_entry->reg[type].event.fn = irq_event_handler;
0295 }
0296 #endif
0297
0298 WARN_ON(!update_irq_entry(irq_entry));
0299 spin_unlock_irqrestore(&irq_lock, flags);
0300
0301 return 0;
0302 out_unlock:
0303 spin_unlock_irqrestore(&irq_lock, flags);
0304 out:
0305 return err;
0306 }
0307
0308
0309
0310
0311
0312
0313 void free_irq_by_fd(int fd)
0314 {
0315 struct irq_entry *to_free;
0316 unsigned long flags;
0317
0318 spin_lock_irqsave(&irq_lock, flags);
0319 to_free = get_irq_entry_by_fd(fd);
0320 free_irq_entry(to_free, true);
0321 spin_unlock_irqrestore(&irq_lock, flags);
0322 }
0323 EXPORT_SYMBOL(free_irq_by_fd);
0324
0325 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
0326 {
0327 struct irq_entry *entry;
0328 unsigned long flags;
0329
0330 spin_lock_irqsave(&irq_lock, flags);
0331 list_for_each_entry(entry, &active_fds, list) {
0332 enum um_irq_type i;
0333
0334 for (i = 0; i < NUM_IRQ_TYPES; i++) {
0335 struct irq_reg *reg = &entry->reg[i];
0336
0337 if (!reg->events)
0338 continue;
0339 if (reg->irq != irq)
0340 continue;
0341 if (reg->id != dev)
0342 continue;
0343
0344 os_del_epoll_fd(entry->fd);
0345 reg->events = 0;
0346 update_or_free_irq_entry(entry);
0347 goto out;
0348 }
0349 }
0350 out:
0351 spin_unlock_irqrestore(&irq_lock, flags);
0352 }
0353
0354 void deactivate_fd(int fd, int irqnum)
0355 {
0356 struct irq_entry *entry;
0357 unsigned long flags;
0358 enum um_irq_type i;
0359
0360 os_del_epoll_fd(fd);
0361
0362 spin_lock_irqsave(&irq_lock, flags);
0363 entry = get_irq_entry_by_fd(fd);
0364 if (!entry)
0365 goto out;
0366
0367 for (i = 0; i < NUM_IRQ_TYPES; i++) {
0368 if (!entry->reg[i].events)
0369 continue;
0370 if (entry->reg[i].irq == irqnum)
0371 entry->reg[i].events = 0;
0372 }
0373
0374 update_or_free_irq_entry(entry);
0375 out:
0376 spin_unlock_irqrestore(&irq_lock, flags);
0377
0378 ignore_sigio_fd(fd);
0379 }
0380 EXPORT_SYMBOL(deactivate_fd);
0381
0382
0383
0384
0385
0386
0387
0388 int deactivate_all_fds(void)
0389 {
0390 struct irq_entry *entry;
0391
0392
0393
0394
0395
0396 os_set_ioignore();
0397
0398
0399 list_for_each_entry(entry, &active_fds, list)
0400 os_del_epoll_fd(entry->fd);
0401 os_close_epoll_fd();
0402 return 0;
0403 }
0404
0405
0406
0407
0408
0409
0410 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
0411 {
0412 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
0413 irq_enter();
0414 generic_handle_irq(irq);
0415 irq_exit();
0416 set_irq_regs(old_regs);
0417 return 1;
0418 }
0419
0420 void um_free_irq(int irq, void *dev)
0421 {
0422 if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
0423 "freeing invalid irq %d", irq))
0424 return;
0425
0426 free_irq_by_irq_and_dev(irq, dev);
0427 free_irq(irq, dev);
0428 clear_bit(irq, irqs_allocated);
0429 }
0430 EXPORT_SYMBOL(um_free_irq);
0431
0432 static int
0433 _um_request_irq(int irq, int fd, enum um_irq_type type,
0434 irq_handler_t handler, unsigned long irqflags,
0435 const char *devname, void *dev_id,
0436 void (*timetravel_handler)(int, int, void *,
0437 struct time_travel_event *))
0438 {
0439 int err;
0440
0441 if (irq == UM_IRQ_ALLOC) {
0442 int i;
0443
0444 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
0445 if (!test_and_set_bit(i, irqs_allocated)) {
0446 irq = i;
0447 break;
0448 }
0449 }
0450 }
0451
0452 if (irq < 0)
0453 return -ENOSPC;
0454
0455 if (fd != -1) {
0456 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
0457 if (err)
0458 goto error;
0459 }
0460
0461 err = request_irq(irq, handler, irqflags, devname, dev_id);
0462 if (err < 0)
0463 goto error;
0464
0465 return irq;
0466 error:
0467 clear_bit(irq, irqs_allocated);
0468 return err;
0469 }
0470
0471 int um_request_irq(int irq, int fd, enum um_irq_type type,
0472 irq_handler_t handler, unsigned long irqflags,
0473 const char *devname, void *dev_id)
0474 {
0475 return _um_request_irq(irq, fd, type, handler, irqflags,
0476 devname, dev_id, NULL);
0477 }
0478 EXPORT_SYMBOL(um_request_irq);
0479
0480 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0481 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
0482 irq_handler_t handler, unsigned long irqflags,
0483 const char *devname, void *dev_id,
0484 void (*timetravel_handler)(int, int, void *,
0485 struct time_travel_event *))
0486 {
0487 return _um_request_irq(irq, fd, type, handler, irqflags,
0488 devname, dev_id, timetravel_handler);
0489 }
0490 EXPORT_SYMBOL(um_request_irq_tt);
0491
0492 void sigio_run_timetravel_handlers(void)
0493 {
0494 _sigio_handler(NULL, true);
0495 }
0496 #endif
0497
0498 #ifdef CONFIG_PM_SLEEP
0499 void um_irqs_suspend(void)
0500 {
0501 struct irq_entry *entry;
0502 unsigned long flags;
0503
0504 irqs_suspended = true;
0505
0506 spin_lock_irqsave(&irq_lock, flags);
0507 list_for_each_entry(entry, &active_fds, list) {
0508 enum um_irq_type t;
0509 bool clear = true;
0510
0511 for (t = 0; t < NUM_IRQ_TYPES; t++) {
0512 if (!entry->reg[t].events)
0513 continue;
0514
0515
0516
0517
0518
0519
0520
0521 if (entry->reg[t].wakeup ||
0522 entry->reg[t].irq == SIGIO_WRITE_IRQ
0523 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0524 || entry->reg[t].timetravel_handler
0525 #endif
0526 ) {
0527 clear = false;
0528 break;
0529 }
0530 }
0531
0532 if (clear) {
0533 entry->suspended = true;
0534 os_clear_fd_async(entry->fd);
0535 entry->sigio_workaround =
0536 !__ignore_sigio_fd(entry->fd);
0537 }
0538 }
0539 spin_unlock_irqrestore(&irq_lock, flags);
0540 }
0541
0542 void um_irqs_resume(void)
0543 {
0544 struct irq_entry *entry;
0545 unsigned long flags;
0546
0547
0548 local_irq_save(flags);
0549 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
0550
0551
0552
0553
0554
0555 list_for_each_entry(entry, &active_fds, list) {
0556 enum um_irq_type t;
0557
0558 for (t = 0; t < NUM_IRQ_TYPES; t++) {
0559 struct irq_reg *reg = &entry->reg[t];
0560
0561 if (reg->pending_on_resume) {
0562 irq_enter();
0563 generic_handle_irq(reg->irq);
0564 irq_exit();
0565 reg->pending_on_resume = false;
0566 }
0567 }
0568 }
0569 #endif
0570
0571 spin_lock(&irq_lock);
0572 list_for_each_entry(entry, &active_fds, list) {
0573 if (entry->suspended) {
0574 int err = os_set_fd_async(entry->fd);
0575
0576 WARN(err < 0, "os_set_fd_async returned %d\n", err);
0577 entry->suspended = false;
0578
0579 if (entry->sigio_workaround) {
0580 err = __add_sigio_fd(entry->fd);
0581 WARN(err < 0, "add_sigio_returned %d\n", err);
0582 }
0583 }
0584 }
0585 spin_unlock_irqrestore(&irq_lock, flags);
0586
0587 irqs_suspended = false;
0588 send_sigio_to_self();
0589 }
0590
0591 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
0592 {
0593 struct irq_entry *entry;
0594 unsigned long flags;
0595
0596 spin_lock_irqsave(&irq_lock, flags);
0597 list_for_each_entry(entry, &active_fds, list) {
0598 enum um_irq_type t;
0599
0600 for (t = 0; t < NUM_IRQ_TYPES; t++) {
0601 if (!entry->reg[t].events)
0602 continue;
0603
0604 if (entry->reg[t].irq != d->irq)
0605 continue;
0606 entry->reg[t].wakeup = on;
0607 goto unlock;
0608 }
0609 }
0610 unlock:
0611 spin_unlock_irqrestore(&irq_lock, flags);
0612 return 0;
0613 }
0614 #else
0615 #define normal_irq_set_wake NULL
0616 #endif
0617
0618
0619
0620
0621
0622 static void dummy(struct irq_data *d)
0623 {
0624 }
0625
0626
0627 static struct irq_chip normal_irq_type = {
0628 .name = "SIGIO",
0629 .irq_disable = dummy,
0630 .irq_enable = dummy,
0631 .irq_ack = dummy,
0632 .irq_mask = dummy,
0633 .irq_unmask = dummy,
0634 .irq_set_wake = normal_irq_set_wake,
0635 };
0636
0637 static struct irq_chip alarm_irq_type = {
0638 .name = "SIGALRM",
0639 .irq_disable = dummy,
0640 .irq_enable = dummy,
0641 .irq_ack = dummy,
0642 .irq_mask = dummy,
0643 .irq_unmask = dummy,
0644 };
0645
0646 void __init init_IRQ(void)
0647 {
0648 int i;
0649
0650 irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
0651
0652 for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
0653 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
0654
0655 os_setup_epoll();
0656 }
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 static unsigned long pending_mask;
0706
0707 unsigned long to_irq_stack(unsigned long *mask_out)
0708 {
0709 struct thread_info *ti;
0710 unsigned long mask, old;
0711 int nested;
0712
0713 mask = xchg(&pending_mask, *mask_out);
0714 if (mask != 0) {
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 old = *mask_out;
0725 do {
0726 old |= mask;
0727 mask = xchg(&pending_mask, old);
0728 } while (mask != old);
0729 return 1;
0730 }
0731
0732 ti = current_thread_info();
0733 nested = (ti->real_thread != NULL);
0734 if (!nested) {
0735 struct task_struct *task;
0736 struct thread_info *tti;
0737
0738 task = cpu_tasks[ti->cpu].task;
0739 tti = task_thread_info(task);
0740
0741 *ti = *tti;
0742 ti->real_thread = tti;
0743 task->stack = ti;
0744 }
0745
0746 mask = xchg(&pending_mask, 0);
0747 *mask_out |= mask | nested;
0748 return 0;
0749 }
0750
0751 unsigned long from_irq_stack(int nested)
0752 {
0753 struct thread_info *ti, *to;
0754 unsigned long mask;
0755
0756 ti = current_thread_info();
0757
0758 pending_mask = 1;
0759
0760 to = ti->real_thread;
0761 current->stack = to;
0762 ti->real_thread = NULL;
0763 *to = *ti;
0764
0765 mask = xchg(&pending_mask, 0);
0766 return mask & ~1;
0767 }
0768