Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Functions related to interrupt-poll handling in the block layer. This
0004  * is similar to NAPI for network devices.
0005  */
0006 #include <linux/kernel.h>
0007 #include <linux/module.h>
0008 #include <linux/init.h>
0009 #include <linux/bio.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/cpu.h>
0012 #include <linux/irq_poll.h>
0013 #include <linux/delay.h>
0014 
0015 static unsigned int irq_poll_budget __read_mostly = 256;
0016 
0017 static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
0018 
0019 /**
0020  * irq_poll_sched - Schedule a run of the iopoll handler
0021  * @iop:      The parent iopoll structure
0022  *
0023  * Description:
0024  *     Add this irq_poll structure to the pending poll list and trigger the
0025  *     raise of the blk iopoll softirq.
0026  **/
0027 void irq_poll_sched(struct irq_poll *iop)
0028 {
0029     unsigned long flags;
0030 
0031     if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
0032         return;
0033     if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
0034         return;
0035 
0036     local_irq_save(flags);
0037     list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
0038     raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0039     local_irq_restore(flags);
0040 }
0041 EXPORT_SYMBOL(irq_poll_sched);
0042 
0043 /**
0044  * __irq_poll_complete - Mark this @iop as un-polled again
0045  * @iop:      The parent iopoll structure
0046  *
0047  * Description:
0048  *     See irq_poll_complete(). This function must be called with interrupts
0049  *     disabled.
0050  **/
0051 static void __irq_poll_complete(struct irq_poll *iop)
0052 {
0053     list_del(&iop->list);
0054     smp_mb__before_atomic();
0055     clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
0056 }
0057 
0058 /**
0059  * irq_poll_complete - Mark this @iop as un-polled again
0060  * @iop:      The parent iopoll structure
0061  *
0062  * Description:
0063  *     If a driver consumes less than the assigned budget in its run of the
0064  *     iopoll handler, it'll end the polled mode by calling this function. The
0065  *     iopoll handler will not be invoked again before irq_poll_sched()
0066  *     is called.
0067  **/
0068 void irq_poll_complete(struct irq_poll *iop)
0069 {
0070     unsigned long flags;
0071 
0072     local_irq_save(flags);
0073     __irq_poll_complete(iop);
0074     local_irq_restore(flags);
0075 }
0076 EXPORT_SYMBOL(irq_poll_complete);
0077 
0078 static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
0079 {
0080     struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
0081     int rearm = 0, budget = irq_poll_budget;
0082     unsigned long start_time = jiffies;
0083 
0084     local_irq_disable();
0085 
0086     while (!list_empty(list)) {
0087         struct irq_poll *iop;
0088         int work, weight;
0089 
0090         /*
0091          * If softirq window is exhausted then punt.
0092          */
0093         if (budget <= 0 || time_after(jiffies, start_time)) {
0094             rearm = 1;
0095             break;
0096         }
0097 
0098         local_irq_enable();
0099 
0100         /* Even though interrupts have been re-enabled, this
0101          * access is safe because interrupts can only add new
0102          * entries to the tail of this list, and only ->poll()
0103          * calls can remove this head entry from the list.
0104          */
0105         iop = list_entry(list->next, struct irq_poll, list);
0106 
0107         weight = iop->weight;
0108         work = 0;
0109         if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
0110             work = iop->poll(iop, weight);
0111 
0112         budget -= work;
0113 
0114         local_irq_disable();
0115 
0116         /*
0117          * Drivers must not modify the iopoll state, if they
0118          * consume their assigned weight (or more, some drivers can't
0119          * easily just stop processing, they have to complete an
0120          * entire mask of commands).In such cases this code
0121          * still "owns" the iopoll instance and therefore can
0122          * move the instance around on the list at-will.
0123          */
0124         if (work >= weight) {
0125             if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
0126                 __irq_poll_complete(iop);
0127             else
0128                 list_move_tail(&iop->list, list);
0129         }
0130     }
0131 
0132     if (rearm)
0133         __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0134 
0135     local_irq_enable();
0136 }
0137 
0138 /**
0139  * irq_poll_disable - Disable iopoll on this @iop
0140  * @iop:      The parent iopoll structure
0141  *
0142  * Description:
0143  *     Disable io polling and wait for any pending callbacks to have completed.
0144  **/
0145 void irq_poll_disable(struct irq_poll *iop)
0146 {
0147     set_bit(IRQ_POLL_F_DISABLE, &iop->state);
0148     while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
0149         msleep(1);
0150     clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
0151 }
0152 EXPORT_SYMBOL(irq_poll_disable);
0153 
0154 /**
0155  * irq_poll_enable - Enable iopoll on this @iop
0156  * @iop:      The parent iopoll structure
0157  *
0158  * Description:
0159  *     Enable iopoll on this @iop. Note that the handler run will not be
0160  *     scheduled, it will only mark it as active.
0161  **/
0162 void irq_poll_enable(struct irq_poll *iop)
0163 {
0164     BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
0165     smp_mb__before_atomic();
0166     clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
0167 }
0168 EXPORT_SYMBOL(irq_poll_enable);
0169 
0170 /**
0171  * irq_poll_init - Initialize this @iop
0172  * @iop:      The parent iopoll structure
0173  * @weight:   The default weight (or command completion budget)
0174  * @poll_fn:  The handler to invoke
0175  *
0176  * Description:
0177  *     Initialize and enable this irq_poll structure.
0178  **/
0179 void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
0180 {
0181     memset(iop, 0, sizeof(*iop));
0182     INIT_LIST_HEAD(&iop->list);
0183     iop->weight = weight;
0184     iop->poll = poll_fn;
0185 }
0186 EXPORT_SYMBOL(irq_poll_init);
0187 
0188 static int irq_poll_cpu_dead(unsigned int cpu)
0189 {
0190     /*
0191      * If a CPU goes away, splice its entries to the current CPU and
0192      * set the POLL softirq bit. The local_bh_disable()/enable() pair
0193      * ensures that it is handled. Otherwise the current CPU could
0194      * reach idle with the POLL softirq pending.
0195      */
0196     local_bh_disable();
0197     local_irq_disable();
0198     list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
0199              this_cpu_ptr(&blk_cpu_iopoll));
0200     __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
0201     local_irq_enable();
0202     local_bh_enable();
0203 
0204     return 0;
0205 }
0206 
0207 static __init int irq_poll_setup(void)
0208 {
0209     int i;
0210 
0211     for_each_possible_cpu(i)
0212         INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
0213 
0214     open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
0215     cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
0216                   irq_poll_cpu_dead);
0217     return 0;
0218 }
0219 subsys_initcall(irq_poll_setup);