Back to home page

LXR

 
 

    


0001 /*
0002  * Functions related to interrupt-poll handling in the block layer. This
0003  * is similar to NAPI for network devices.
0004  */
0005 #include <linux/kernel.h>
0006 #include <linux/module.h>
0007 #include <linux/init.h>
0008 #include <linux/bio.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/cpu.h>
0011 #include <linux/irq_poll.h>
0012 #include <linux/delay.h>
0013 
0014 static unsigned int irq_poll_budget __read_mostly = 256;
0015 
0016 static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
0017 
0018 /**
0019  * irq_poll_sched - Schedule a run of the iopoll handler
0020  * @iop:      The parent iopoll structure
0021  *
0022  * Description:
0023  *     Add this irq_poll structure to the pending poll list and trigger the
0024  *     raise of the blk iopoll softirq.
0025  **/
0026 void irq_poll_sched(struct irq_poll *iop)
0027 {
0028     unsigned long flags;
0029 
0030     if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
0031         return;
0032     if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
0033         return;
0034 
0035     local_irq_save(flags);
0036     list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
0037     __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0038     local_irq_restore(flags);
0039 }
0040 EXPORT_SYMBOL(irq_poll_sched);
0041 
0042 /**
0043  * __irq_poll_complete - Mark this @iop as un-polled again
0044  * @iop:      The parent iopoll structure
0045  *
0046  * Description:
0047  *     See irq_poll_complete(). This function must be called with interrupts
0048  *     disabled.
0049  **/
0050 static void __irq_poll_complete(struct irq_poll *iop)
0051 {
0052     list_del(&iop->list);
0053     smp_mb__before_atomic();
0054     clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
0055 }
0056 
0057 /**
0058  * irq_poll_complete - Mark this @iop as un-polled again
0059  * @iop:      The parent iopoll structure
0060  *
0061  * Description:
0062  *     If a driver consumes less than the assigned budget in its run of the
0063  *     iopoll handler, it'll end the polled mode by calling this function. The
0064  *     iopoll handler will not be invoked again before irq_poll_sched()
0065  *     is called.
0066  **/
0067 void irq_poll_complete(struct irq_poll *iop)
0068 {
0069     unsigned long flags;
0070 
0071     local_irq_save(flags);
0072     __irq_poll_complete(iop);
0073     local_irq_restore(flags);
0074 }
0075 EXPORT_SYMBOL(irq_poll_complete);
0076 
0077 static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
0078 {
0079     struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
0080     int rearm = 0, budget = irq_poll_budget;
0081     unsigned long start_time = jiffies;
0082 
0083     local_irq_disable();
0084 
0085     while (!list_empty(list)) {
0086         struct irq_poll *iop;
0087         int work, weight;
0088 
0089         /*
0090          * If softirq window is exhausted then punt.
0091          */
0092         if (budget <= 0 || time_after(jiffies, start_time)) {
0093             rearm = 1;
0094             break;
0095         }
0096 
0097         local_irq_enable();
0098 
0099         /* Even though interrupts have been re-enabled, this
0100          * access is safe because interrupts can only add new
0101          * entries to the tail of this list, and only ->poll()
0102          * calls can remove this head entry from the list.
0103          */
0104         iop = list_entry(list->next, struct irq_poll, list);
0105 
0106         weight = iop->weight;
0107         work = 0;
0108         if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
0109             work = iop->poll(iop, weight);
0110 
0111         budget -= work;
0112 
0113         local_irq_disable();
0114 
0115         /*
0116          * Drivers must not modify the iopoll state, if they
0117          * consume their assigned weight (or more, some drivers can't
0118          * easily just stop processing, they have to complete an
0119          * entire mask of commands).In such cases this code
0120          * still "owns" the iopoll instance and therefore can
0121          * move the instance around on the list at-will.
0122          */
0123         if (work >= weight) {
0124             if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
0125                 __irq_poll_complete(iop);
0126             else
0127                 list_move_tail(&iop->list, list);
0128         }
0129     }
0130 
0131     if (rearm)
0132         __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0133 
0134     local_irq_enable();
0135 }
0136 
0137 /**
0138  * irq_poll_disable - Disable iopoll on this @iop
0139  * @iop:      The parent iopoll structure
0140  *
0141  * Description:
0142  *     Disable io polling and wait for any pending callbacks to have completed.
0143  **/
0144 void irq_poll_disable(struct irq_poll *iop)
0145 {
0146     set_bit(IRQ_POLL_F_DISABLE, &iop->state);
0147     while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
0148         msleep(1);
0149     clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
0150 }
0151 EXPORT_SYMBOL(irq_poll_disable);
0152 
0153 /**
0154  * irq_poll_enable - Enable iopoll on this @iop
0155  * @iop:      The parent iopoll structure
0156  *
0157  * Description:
0158  *     Enable iopoll on this @iop. Note that the handler run will not be
0159  *     scheduled, it will only mark it as active.
0160  **/
0161 void irq_poll_enable(struct irq_poll *iop)
0162 {
0163     BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
0164     smp_mb__before_atomic();
0165     clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
0166 }
0167 EXPORT_SYMBOL(irq_poll_enable);
0168 
0169 /**
0170  * irq_poll_init - Initialize this @iop
0171  * @iop:      The parent iopoll structure
0172  * @weight:   The default weight (or command completion budget)
0173  * @poll_fn:  The handler to invoke
0174  *
0175  * Description:
0176  *     Initialize and enable this irq_poll structure.
0177  **/
0178 void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
0179 {
0180     memset(iop, 0, sizeof(*iop));
0181     INIT_LIST_HEAD(&iop->list);
0182     iop->weight = weight;
0183     iop->poll = poll_fn;
0184 }
0185 EXPORT_SYMBOL(irq_poll_init);
0186 
0187 static int irq_poll_cpu_dead(unsigned int cpu)
0188 {
0189     /*
0190      * If a CPU goes away, splice its entries to the current CPU
0191      * and trigger a run of the softirq
0192      */
0193     local_irq_disable();
0194     list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
0195              this_cpu_ptr(&blk_cpu_iopoll));
0196     __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0197     local_irq_enable();
0198 
0199     return 0;
0200 }
0201 
0202 static __init int irq_poll_setup(void)
0203 {
0204     int i;
0205 
0206     for_each_possible_cpu(i)
0207         INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
0208 
0209     open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
0210     cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
0211                   irq_poll_cpu_dead);
0212     return 0;
0213 }
0214 subsys_initcall(irq_poll_setup);