0001
0002 #ifndef _LINUX_IRQ_H
0003 #define _LINUX_IRQ_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/cache.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/irqhandler.h>
0017 #include <linux/irqreturn.h>
0018 #include <linux/irqnr.h>
0019 #include <linux/topology.h>
0020 #include <linux/io.h>
0021 #include <linux/slab.h>
0022
0023 #include <asm/irq.h>
0024 #include <asm/ptrace.h>
0025 #include <asm/irq_regs.h>
0026
0027 struct seq_file;
0028 struct module;
0029 struct msi_msg;
0030 struct irq_affinity_desc;
0031 enum irqchip_irq_state;
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 enum {
0078 IRQ_TYPE_NONE = 0x00000000,
0079 IRQ_TYPE_EDGE_RISING = 0x00000001,
0080 IRQ_TYPE_EDGE_FALLING = 0x00000002,
0081 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
0082 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
0083 IRQ_TYPE_LEVEL_LOW = 0x00000008,
0084 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
0085 IRQ_TYPE_SENSE_MASK = 0x0000000f,
0086 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
0087
0088 IRQ_TYPE_PROBE = 0x00000010,
0089
0090 IRQ_LEVEL = (1 << 8),
0091 IRQ_PER_CPU = (1 << 9),
0092 IRQ_NOPROBE = (1 << 10),
0093 IRQ_NOREQUEST = (1 << 11),
0094 IRQ_NOAUTOEN = (1 << 12),
0095 IRQ_NO_BALANCING = (1 << 13),
0096 IRQ_MOVE_PCNTXT = (1 << 14),
0097 IRQ_NESTED_THREAD = (1 << 15),
0098 IRQ_NOTHREAD = (1 << 16),
0099 IRQ_PER_CPU_DEVID = (1 << 17),
0100 IRQ_IS_POLLED = (1 << 18),
0101 IRQ_DISABLE_UNLAZY = (1 << 19),
0102 IRQ_HIDDEN = (1 << 20),
0103 IRQ_NO_DEBUG = (1 << 21),
0104 };
0105
0106 #define IRQF_MODIFY_MASK \
0107 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
0108 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
0109 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
0110 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
0111
0112 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 enum {
0124 IRQ_SET_MASK_OK = 0,
0125 IRQ_SET_MASK_OK_NOCOPY,
0126 IRQ_SET_MASK_OK_DONE,
0127 };
0128
0129 struct msi_desc;
0130 struct irq_domain;
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 struct irq_common_data {
0148 unsigned int __private state_use_accessors;
0149 #ifdef CONFIG_NUMA
0150 unsigned int node;
0151 #endif
0152 void *handler_data;
0153 struct msi_desc *msi_desc;
0154 #ifdef CONFIG_SMP
0155 cpumask_var_t affinity;
0156 #endif
0157 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0158 cpumask_var_t effective_affinity;
0159 #endif
0160 #ifdef CONFIG_GENERIC_IRQ_IPI
0161 unsigned int ipi_offset;
0162 #endif
0163 };
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 struct irq_data {
0180 u32 mask;
0181 unsigned int irq;
0182 unsigned long hwirq;
0183 struct irq_common_data *common;
0184 struct irq_chip *chip;
0185 struct irq_domain *domain;
0186 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0187 struct irq_data *parent_data;
0188 #endif
0189 void *chip_data;
0190 };
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 enum {
0228 IRQD_TRIGGER_MASK = 0xf,
0229 IRQD_SETAFFINITY_PENDING = (1 << 8),
0230 IRQD_ACTIVATED = (1 << 9),
0231 IRQD_NO_BALANCING = (1 << 10),
0232 IRQD_PER_CPU = (1 << 11),
0233 IRQD_AFFINITY_SET = (1 << 12),
0234 IRQD_LEVEL = (1 << 13),
0235 IRQD_WAKEUP_STATE = (1 << 14),
0236 IRQD_MOVE_PCNTXT = (1 << 15),
0237 IRQD_IRQ_DISABLED = (1 << 16),
0238 IRQD_IRQ_MASKED = (1 << 17),
0239 IRQD_IRQ_INPROGRESS = (1 << 18),
0240 IRQD_WAKEUP_ARMED = (1 << 19),
0241 IRQD_FORWARDED_TO_VCPU = (1 << 20),
0242 IRQD_AFFINITY_MANAGED = (1 << 21),
0243 IRQD_IRQ_STARTED = (1 << 22),
0244 IRQD_MANAGED_SHUTDOWN = (1 << 23),
0245 IRQD_SINGLE_TARGET = (1 << 24),
0246 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
0247 IRQD_CAN_RESERVE = (1 << 26),
0248 IRQD_MSI_NOMASK_QUIRK = (1 << 27),
0249 IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
0250 IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
0251 IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
0252 };
0253
0254 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
0255
0256 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
0257 {
0258 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
0259 }
0260
0261 static inline bool irqd_is_per_cpu(struct irq_data *d)
0262 {
0263 return __irqd_to_state(d) & IRQD_PER_CPU;
0264 }
0265
0266 static inline bool irqd_can_balance(struct irq_data *d)
0267 {
0268 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
0269 }
0270
0271 static inline bool irqd_affinity_was_set(struct irq_data *d)
0272 {
0273 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
0274 }
0275
0276 static inline void irqd_mark_affinity_was_set(struct irq_data *d)
0277 {
0278 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
0279 }
0280
0281 static inline bool irqd_trigger_type_was_set(struct irq_data *d)
0282 {
0283 return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
0284 }
0285
0286 static inline u32 irqd_get_trigger_type(struct irq_data *d)
0287 {
0288 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
0289 }
0290
0291
0292
0293
0294
0295 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
0296 {
0297 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
0298 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
0299 __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
0300 }
0301
0302 static inline bool irqd_is_level_type(struct irq_data *d)
0303 {
0304 return __irqd_to_state(d) & IRQD_LEVEL;
0305 }
0306
0307
0308
0309
0310
0311 static inline void irqd_set_single_target(struct irq_data *d)
0312 {
0313 __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
0314 }
0315
0316 static inline bool irqd_is_single_target(struct irq_data *d)
0317 {
0318 return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
0319 }
0320
0321 static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
0322 {
0323 __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
0324 }
0325
0326 static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
0327 {
0328 return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
0329 }
0330
0331 static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
0332 {
0333 return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
0334 }
0335
0336 static inline bool irqd_is_wakeup_set(struct irq_data *d)
0337 {
0338 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
0339 }
0340
0341 static inline bool irqd_can_move_in_process_context(struct irq_data *d)
0342 {
0343 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
0344 }
0345
0346 static inline bool irqd_irq_disabled(struct irq_data *d)
0347 {
0348 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
0349 }
0350
0351 static inline bool irqd_irq_masked(struct irq_data *d)
0352 {
0353 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
0354 }
0355
0356 static inline bool irqd_irq_inprogress(struct irq_data *d)
0357 {
0358 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
0359 }
0360
0361 static inline bool irqd_is_wakeup_armed(struct irq_data *d)
0362 {
0363 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
0364 }
0365
0366 static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
0367 {
0368 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
0369 }
0370
0371 static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
0372 {
0373 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
0374 }
0375
0376 static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
0377 {
0378 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
0379 }
0380
0381 static inline bool irqd_affinity_is_managed(struct irq_data *d)
0382 {
0383 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
0384 }
0385
0386 static inline bool irqd_is_activated(struct irq_data *d)
0387 {
0388 return __irqd_to_state(d) & IRQD_ACTIVATED;
0389 }
0390
0391 static inline void irqd_set_activated(struct irq_data *d)
0392 {
0393 __irqd_to_state(d) |= IRQD_ACTIVATED;
0394 }
0395
0396 static inline void irqd_clr_activated(struct irq_data *d)
0397 {
0398 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
0399 }
0400
0401 static inline bool irqd_is_started(struct irq_data *d)
0402 {
0403 return __irqd_to_state(d) & IRQD_IRQ_STARTED;
0404 }
0405
0406 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
0407 {
0408 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
0409 }
0410
0411 static inline void irqd_set_can_reserve(struct irq_data *d)
0412 {
0413 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
0414 }
0415
0416 static inline void irqd_clr_can_reserve(struct irq_data *d)
0417 {
0418 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
0419 }
0420
0421 static inline bool irqd_can_reserve(struct irq_data *d)
0422 {
0423 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
0424 }
0425
0426 static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
0427 {
0428 __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
0429 }
0430
0431 static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
0432 {
0433 __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
0434 }
0435
0436 static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
0437 {
0438 return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
0439 }
0440
0441 static inline void irqd_set_affinity_on_activate(struct irq_data *d)
0442 {
0443 __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
0444 }
0445
0446 static inline bool irqd_affinity_on_activate(struct irq_data *d)
0447 {
0448 return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
0449 }
0450
0451 #undef __irqd_to_state
0452
0453 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
0454 {
0455 return d->hwirq;
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 struct irq_chip {
0507 const char *name;
0508 unsigned int (*irq_startup)(struct irq_data *data);
0509 void (*irq_shutdown)(struct irq_data *data);
0510 void (*irq_enable)(struct irq_data *data);
0511 void (*irq_disable)(struct irq_data *data);
0512
0513 void (*irq_ack)(struct irq_data *data);
0514 void (*irq_mask)(struct irq_data *data);
0515 void (*irq_mask_ack)(struct irq_data *data);
0516 void (*irq_unmask)(struct irq_data *data);
0517 void (*irq_eoi)(struct irq_data *data);
0518
0519 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
0520 int (*irq_retrigger)(struct irq_data *data);
0521 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
0522 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
0523
0524 void (*irq_bus_lock)(struct irq_data *data);
0525 void (*irq_bus_sync_unlock)(struct irq_data *data);
0526
0527 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
0528 void (*irq_cpu_online)(struct irq_data *data);
0529 void (*irq_cpu_offline)(struct irq_data *data);
0530 #endif
0531 void (*irq_suspend)(struct irq_data *data);
0532 void (*irq_resume)(struct irq_data *data);
0533 void (*irq_pm_shutdown)(struct irq_data *data);
0534
0535 void (*irq_calc_mask)(struct irq_data *data);
0536
0537 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
0538 int (*irq_request_resources)(struct irq_data *data);
0539 void (*irq_release_resources)(struct irq_data *data);
0540
0541 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
0542 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
0543
0544 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
0545 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
0546
0547 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
0548
0549 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
0550 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
0551
0552 int (*irq_nmi_setup)(struct irq_data *data);
0553 void (*irq_nmi_teardown)(struct irq_data *data);
0554
0555 unsigned long flags;
0556 };
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 enum {
0577 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
0578 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
0579 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
0580 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
0581 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
0582 IRQCHIP_ONESHOT_SAFE = (1 << 5),
0583 IRQCHIP_EOI_THREADED = (1 << 6),
0584 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
0585 IRQCHIP_SUPPORTS_NMI = (1 << 8),
0586 IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
0587 IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
0588 IRQCHIP_IMMUTABLE = (1 << 11),
0589 };
0590
0591 #include <linux/irqdesc.h>
0592
0593
0594
0595
0596 #include <asm/hw_irq.h>
0597
0598 #ifndef NR_IRQS_LEGACY
0599 # define NR_IRQS_LEGACY 0
0600 #endif
0601
0602 #ifndef ARCH_IRQ_INIT_FLAGS
0603 # define ARCH_IRQ_INIT_FLAGS 0
0604 #endif
0605
0606 #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
0607
0608 struct irqaction;
0609 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
0610 extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
0611
0612 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
0613 extern void irq_cpu_online(void);
0614 extern void irq_cpu_offline(void);
0615 #endif
0616 extern int irq_set_affinity_locked(struct irq_data *data,
0617 const struct cpumask *cpumask, bool force);
0618 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
0619
0620 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
0621 extern void irq_migrate_all_off_this_cpu(void);
0622 extern int irq_affinity_online_cpu(unsigned int cpu);
0623 #else
0624 # define irq_affinity_online_cpu NULL
0625 #endif
0626
0627 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
0628 void __irq_move_irq(struct irq_data *data);
0629 static inline void irq_move_irq(struct irq_data *data)
0630 {
0631 if (unlikely(irqd_is_setaffinity_pending(data)))
0632 __irq_move_irq(data);
0633 }
0634 void irq_move_masked_irq(struct irq_data *data);
0635 void irq_force_complete_move(struct irq_desc *desc);
0636 #else
0637 static inline void irq_move_irq(struct irq_data *data) { }
0638 static inline void irq_move_masked_irq(struct irq_data *data) { }
0639 static inline void irq_force_complete_move(struct irq_desc *desc) { }
0640 #endif
0641
0642 extern int no_irq_affinity;
0643
0644 #ifdef CONFIG_HARDIRQS_SW_RESEND
0645 int irq_set_parent(int irq, int parent_irq);
0646 #else
0647 static inline int irq_set_parent(int irq, int parent_irq)
0648 {
0649 return 0;
0650 }
0651 #endif
0652
0653
0654
0655
0656
0657 extern void handle_level_irq(struct irq_desc *desc);
0658 extern void handle_fasteoi_irq(struct irq_desc *desc);
0659 extern void handle_edge_irq(struct irq_desc *desc);
0660 extern void handle_edge_eoi_irq(struct irq_desc *desc);
0661 extern void handle_simple_irq(struct irq_desc *desc);
0662 extern void handle_untracked_irq(struct irq_desc *desc);
0663 extern void handle_percpu_irq(struct irq_desc *desc);
0664 extern void handle_percpu_devid_irq(struct irq_desc *desc);
0665 extern void handle_bad_irq(struct irq_desc *desc);
0666 extern void handle_nested_irq(unsigned int irq);
0667
0668 extern void handle_fasteoi_nmi(struct irq_desc *desc);
0669 extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
0670
0671 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
0672 extern int irq_chip_pm_get(struct irq_data *data);
0673 extern int irq_chip_pm_put(struct irq_data *data);
0674 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
0675 extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
0676 extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
0677 extern int irq_chip_set_parent_state(struct irq_data *data,
0678 enum irqchip_irq_state which,
0679 bool val);
0680 extern int irq_chip_get_parent_state(struct irq_data *data,
0681 enum irqchip_irq_state which,
0682 bool *state);
0683 extern void irq_chip_enable_parent(struct irq_data *data);
0684 extern void irq_chip_disable_parent(struct irq_data *data);
0685 extern void irq_chip_ack_parent(struct irq_data *data);
0686 extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
0687 extern void irq_chip_mask_parent(struct irq_data *data);
0688 extern void irq_chip_mask_ack_parent(struct irq_data *data);
0689 extern void irq_chip_unmask_parent(struct irq_data *data);
0690 extern void irq_chip_eoi_parent(struct irq_data *data);
0691 extern int irq_chip_set_affinity_parent(struct irq_data *data,
0692 const struct cpumask *dest,
0693 bool force);
0694 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
0695 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
0696 void *vcpu_info);
0697 extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
0698 extern int irq_chip_request_resources_parent(struct irq_data *data);
0699 extern void irq_chip_release_resources_parent(struct irq_data *data);
0700 #endif
0701
0702
0703 extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
0704
0705
0706
0707 extern int noirqdebug_setup(char *str);
0708
0709
0710 extern int can_request_irq(unsigned int irq, unsigned long irqflags);
0711
0712
0713 extern struct irq_chip no_irq_chip;
0714 extern struct irq_chip dummy_irq_chip;
0715
0716 extern void
0717 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
0718 irq_flow_handler_t handle, const char *name);
0719
0720 static inline void irq_set_chip_and_handler(unsigned int irq,
0721 const struct irq_chip *chip,
0722 irq_flow_handler_t handle)
0723 {
0724 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
0725 }
0726
0727 extern int irq_set_percpu_devid(unsigned int irq);
0728 extern int irq_set_percpu_devid_partition(unsigned int irq,
0729 const struct cpumask *affinity);
0730 extern int irq_get_percpu_devid_partition(unsigned int irq,
0731 struct cpumask *affinity);
0732
0733 extern void
0734 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
0735 const char *name);
0736
0737 static inline void
0738 irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
0739 {
0740 __irq_set_handler(irq, handle, 0, NULL);
0741 }
0742
0743
0744
0745
0746
0747
0748 static inline void
0749 irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
0750 {
0751 __irq_set_handler(irq, handle, 1, NULL);
0752 }
0753
0754
0755
0756
0757
0758
0759 void
0760 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
0761 void *data);
0762
0763 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
0764
0765 static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
0766 {
0767 irq_modify_status(irq, 0, set);
0768 }
0769
0770 static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
0771 {
0772 irq_modify_status(irq, clr, 0);
0773 }
0774
0775 static inline void irq_set_noprobe(unsigned int irq)
0776 {
0777 irq_modify_status(irq, 0, IRQ_NOPROBE);
0778 }
0779
0780 static inline void irq_set_probe(unsigned int irq)
0781 {
0782 irq_modify_status(irq, IRQ_NOPROBE, 0);
0783 }
0784
0785 static inline void irq_set_nothread(unsigned int irq)
0786 {
0787 irq_modify_status(irq, 0, IRQ_NOTHREAD);
0788 }
0789
0790 static inline void irq_set_thread(unsigned int irq)
0791 {
0792 irq_modify_status(irq, IRQ_NOTHREAD, 0);
0793 }
0794
0795 static inline void irq_set_nested_thread(unsigned int irq, bool nest)
0796 {
0797 if (nest)
0798 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
0799 else
0800 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
0801 }
0802
0803 static inline void irq_set_percpu_devid_flags(unsigned int irq)
0804 {
0805 irq_set_status_flags(irq,
0806 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
0807 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
0808 }
0809
0810
0811 extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
0812 extern int irq_set_handler_data(unsigned int irq, void *data);
0813 extern int irq_set_chip_data(unsigned int irq, void *data);
0814 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
0815 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
0816 extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
0817 struct msi_desc *entry);
0818 extern struct irq_data *irq_get_irq_data(unsigned int irq);
0819
0820 static inline struct irq_chip *irq_get_chip(unsigned int irq)
0821 {
0822 struct irq_data *d = irq_get_irq_data(irq);
0823 return d ? d->chip : NULL;
0824 }
0825
0826 static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
0827 {
0828 return d->chip;
0829 }
0830
0831 static inline void *irq_get_chip_data(unsigned int irq)
0832 {
0833 struct irq_data *d = irq_get_irq_data(irq);
0834 return d ? d->chip_data : NULL;
0835 }
0836
0837 static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
0838 {
0839 return d->chip_data;
0840 }
0841
0842 static inline void *irq_get_handler_data(unsigned int irq)
0843 {
0844 struct irq_data *d = irq_get_irq_data(irq);
0845 return d ? d->common->handler_data : NULL;
0846 }
0847
0848 static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
0849 {
0850 return d->common->handler_data;
0851 }
0852
0853 static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
0854 {
0855 struct irq_data *d = irq_get_irq_data(irq);
0856 return d ? d->common->msi_desc : NULL;
0857 }
0858
0859 static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
0860 {
0861 return d->common->msi_desc;
0862 }
0863
0864 static inline u32 irq_get_trigger_type(unsigned int irq)
0865 {
0866 struct irq_data *d = irq_get_irq_data(irq);
0867 return d ? irqd_get_trigger_type(d) : 0;
0868 }
0869
0870 static inline int irq_common_data_get_node(struct irq_common_data *d)
0871 {
0872 #ifdef CONFIG_NUMA
0873 return d->node;
0874 #else
0875 return 0;
0876 #endif
0877 }
0878
0879 static inline int irq_data_get_node(struct irq_data *d)
0880 {
0881 return irq_common_data_get_node(d->common);
0882 }
0883
0884 static inline
0885 const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
0886 {
0887 #ifdef CONFIG_SMP
0888 return d->common->affinity;
0889 #else
0890 return cpumask_of(0);
0891 #endif
0892 }
0893
0894 static inline void irq_data_update_affinity(struct irq_data *d,
0895 const struct cpumask *m)
0896 {
0897 #ifdef CONFIG_SMP
0898 cpumask_copy(d->common->affinity, m);
0899 #endif
0900 }
0901
0902 static inline const struct cpumask *irq_get_affinity_mask(int irq)
0903 {
0904 struct irq_data *d = irq_get_irq_data(irq);
0905
0906 return d ? irq_data_get_affinity_mask(d) : NULL;
0907 }
0908
0909 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0910 static inline
0911 const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
0912 {
0913 return d->common->effective_affinity;
0914 }
0915 static inline void irq_data_update_effective_affinity(struct irq_data *d,
0916 const struct cpumask *m)
0917 {
0918 cpumask_copy(d->common->effective_affinity, m);
0919 }
0920 #else
0921 static inline void irq_data_update_effective_affinity(struct irq_data *d,
0922 const struct cpumask *m)
0923 {
0924 }
0925 static inline
0926 const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
0927 {
0928 return irq_data_get_affinity_mask(d);
0929 }
0930 #endif
0931
0932 static inline
0933 const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
0934 {
0935 struct irq_data *d = irq_get_irq_data(irq);
0936
0937 return d ? irq_data_get_effective_affinity_mask(d) : NULL;
0938 }
0939
0940 unsigned int arch_dynirq_lower_bound(unsigned int from);
0941
0942 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
0943 struct module *owner,
0944 const struct irq_affinity_desc *affinity);
0945
0946 int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
0947 unsigned int cnt, int node, struct module *owner,
0948 const struct irq_affinity_desc *affinity);
0949
0950
0951 #define irq_alloc_descs(irq, from, cnt, node) \
0952 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
0953
0954 #define irq_alloc_desc(node) \
0955 irq_alloc_descs(-1, 1, 1, node)
0956
0957 #define irq_alloc_desc_at(at, node) \
0958 irq_alloc_descs(at, at, 1, node)
0959
0960 #define irq_alloc_desc_from(from, node) \
0961 irq_alloc_descs(-1, from, 1, node)
0962
0963 #define irq_alloc_descs_from(from, cnt, node) \
0964 irq_alloc_descs(-1, from, cnt, node)
0965
0966 #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
0967 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
0968
0969 #define devm_irq_alloc_desc(dev, node) \
0970 devm_irq_alloc_descs(dev, -1, 1, 1, node)
0971
0972 #define devm_irq_alloc_desc_at(dev, at, node) \
0973 devm_irq_alloc_descs(dev, at, at, 1, node)
0974
0975 #define devm_irq_alloc_desc_from(dev, from, node) \
0976 devm_irq_alloc_descs(dev, -1, from, 1, node)
0977
0978 #define devm_irq_alloc_descs_from(dev, from, cnt, node) \
0979 devm_irq_alloc_descs(dev, -1, from, cnt, node)
0980
0981 void irq_free_descs(unsigned int irq, unsigned int cnt);
0982 static inline void irq_free_desc(unsigned int irq)
0983 {
0984 irq_free_descs(irq, 1);
0985 }
0986
0987 #ifdef CONFIG_GENERIC_IRQ_LEGACY
0988 void irq_init_desc(unsigned int irq);
0989 #endif
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 struct irq_chip_regs {
1002 unsigned long enable;
1003 unsigned long disable;
1004 unsigned long mask;
1005 unsigned long ack;
1006 unsigned long eoi;
1007 unsigned long type;
1008 unsigned long polarity;
1009 };
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 struct irq_chip_type {
1025 struct irq_chip chip;
1026 struct irq_chip_regs regs;
1027 irq_flow_handler_t handler;
1028 u32 type;
1029 u32 mask_cache_priv;
1030 u32 *mask_cache;
1031 };
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 struct irq_chip_generic {
1067 raw_spinlock_t lock;
1068 void __iomem *reg_base;
1069 u32 (*reg_readl)(void __iomem *addr);
1070 void (*reg_writel)(u32 val, void __iomem *addr);
1071 void (*suspend)(struct irq_chip_generic *gc);
1072 void (*resume)(struct irq_chip_generic *gc);
1073 unsigned int irq_base;
1074 unsigned int irq_cnt;
1075 u32 mask_cache;
1076 u32 type_cache;
1077 u32 polarity_cache;
1078 u32 wake_enabled;
1079 u32 wake_active;
1080 unsigned int num_ct;
1081 void *private;
1082 unsigned long installed;
1083 unsigned long unused;
1084 struct irq_domain *domain;
1085 struct list_head list;
1086 struct irq_chip_type chip_types[];
1087 };
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099 enum irq_gc_flags {
1100 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
1101 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
1102 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
1103 IRQ_GC_NO_MASK = 1 << 3,
1104 IRQ_GC_BE_IO = 1 << 4,
1105 };
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 struct irq_domain_chip_generic {
1117 unsigned int irqs_per_chip;
1118 unsigned int num_chips;
1119 unsigned int irq_flags_to_clear;
1120 unsigned int irq_flags_to_set;
1121 enum irq_gc_flags gc_flags;
1122 struct irq_chip_generic *gc[];
1123 };
1124
1125
1126 void irq_gc_noop(struct irq_data *d);
1127 void irq_gc_mask_disable_reg(struct irq_data *d);
1128 void irq_gc_mask_set_bit(struct irq_data *d);
1129 void irq_gc_mask_clr_bit(struct irq_data *d);
1130 void irq_gc_unmask_enable_reg(struct irq_data *d);
1131 void irq_gc_ack_set_bit(struct irq_data *d);
1132 void irq_gc_ack_clr_bit(struct irq_data *d);
1133 void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1134 void irq_gc_eoi(struct irq_data *d);
1135 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1136
1137
1138 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1139 irq_hw_number_t hw_irq);
1140 void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
1141 struct irq_chip_generic *
1142 irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1143 void __iomem *reg_base, irq_flow_handler_t handler);
1144 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1145 enum irq_gc_flags flags, unsigned int clr,
1146 unsigned int set);
1147 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1148 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1149 unsigned int clr, unsigned int set);
1150
1151 struct irq_chip_generic *
1152 devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1153 unsigned int irq_base, void __iomem *reg_base,
1154 irq_flow_handler_t handler);
1155 int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1156 u32 msk, enum irq_gc_flags flags,
1157 unsigned int clr, unsigned int set);
1158
1159 struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1160
1161 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1162 int num_ct, const char *name,
1163 irq_flow_handler_t handler,
1164 unsigned int clr, unsigned int set,
1165 enum irq_gc_flags flags);
1166
1167 #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
1168 handler, clr, set, flags) \
1169 ({ \
1170 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
1171 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1172 handler, clr, set, flags); \
1173 })
1174
1175 static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1176 {
1177 kfree(gc);
1178 }
1179
1180 static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1181 u32 msk, unsigned int clr,
1182 unsigned int set)
1183 {
1184 irq_remove_generic_chip(gc, msk, clr, set);
1185 irq_free_generic_chip(gc);
1186 }
1187
1188 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1189 {
1190 return container_of(d->chip, struct irq_chip_type, chip);
1191 }
1192
1193 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1194
1195 #ifdef CONFIG_SMP
1196 static inline void irq_gc_lock(struct irq_chip_generic *gc)
1197 {
1198 raw_spin_lock(&gc->lock);
1199 }
1200
1201 static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1202 {
1203 raw_spin_unlock(&gc->lock);
1204 }
1205 #else
1206 static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1207 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1208 #endif
1209
1210
1211
1212
1213
1214 #define irq_gc_lock_irqsave(gc, flags) \
1215 raw_spin_lock_irqsave(&(gc)->lock, flags)
1216
1217 #define irq_gc_unlock_irqrestore(gc, flags) \
1218 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1219
1220 static inline void irq_reg_writel(struct irq_chip_generic *gc,
1221 u32 val, int reg_offset)
1222 {
1223 if (gc->reg_writel)
1224 gc->reg_writel(val, gc->reg_base + reg_offset);
1225 else
1226 writel(val, gc->reg_base + reg_offset);
1227 }
1228
1229 static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1230 int reg_offset)
1231 {
1232 if (gc->reg_readl)
1233 return gc->reg_readl(gc->reg_base + reg_offset);
1234 else
1235 return readl(gc->reg_base + reg_offset);
1236 }
1237
1238 struct irq_matrix;
1239 struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1240 unsigned int alloc_start,
1241 unsigned int alloc_end);
1242 void irq_matrix_online(struct irq_matrix *m);
1243 void irq_matrix_offline(struct irq_matrix *m);
1244 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1245 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1246 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1247 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1248 unsigned int *mapped_cpu);
1249 void irq_matrix_reserve(struct irq_matrix *m);
1250 void irq_matrix_remove_reserved(struct irq_matrix *m);
1251 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1252 bool reserved, unsigned int *mapped_cpu);
1253 void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1254 unsigned int bit, bool managed);
1255 void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1256 unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1257 unsigned int irq_matrix_allocated(struct irq_matrix *m);
1258 unsigned int irq_matrix_reserved(struct irq_matrix *m);
1259 void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1260
1261
1262 #define INVALID_HWIRQ (~0UL)
1263 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1264 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1265 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1266 int ipi_send_single(unsigned int virq, unsigned int cpu);
1267 int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1268
1269 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1270
1271
1272
1273
1274
1275
1276
1277
1278 int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
1279
1280
1281
1282
1283
1284 extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
1285 asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
1286 #else
1287 #ifndef set_handle_irq
1288 #define set_handle_irq(handle_irq) \
1289 do { \
1290 (void)handle_irq; \
1291 WARN_ON(1); \
1292 } while (0)
1293 #endif
1294 #endif
1295
1296 #endif