Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2015 Imagination Technologies Ltd
0004  * Author: Qais Yousef <qais.yousef@imgtec.com>
0005  *
0006  * This file contains driver APIs to the IPI subsystem.
0007  */
0008 
0009 #define pr_fmt(fmt) "genirq/ipi: " fmt
0010 
0011 #include <linux/irqdomain.h>
0012 #include <linux/irq.h>
0013 
0014 /**
0015  * irq_reserve_ipi() - Setup an IPI to destination cpumask
0016  * @domain: IPI domain
0017  * @dest:   cpumask of CPUs which can receive the IPI
0018  *
0019  * Allocate a virq that can be used to send IPI to any CPU in dest mask.
0020  *
0021  * Return: Linux IRQ number on success or error code on failure
0022  */
0023 int irq_reserve_ipi(struct irq_domain *domain,
0024                  const struct cpumask *dest)
0025 {
0026     unsigned int nr_irqs, offset;
0027     struct irq_data *data;
0028     int virq, i;
0029 
0030     if (!domain ||!irq_domain_is_ipi(domain)) {
0031         pr_warn("Reservation on a non IPI domain\n");
0032         return -EINVAL;
0033     }
0034 
0035     if (!cpumask_subset(dest, cpu_possible_mask)) {
0036         pr_warn("Reservation is not in possible_cpu_mask\n");
0037         return -EINVAL;
0038     }
0039 
0040     nr_irqs = cpumask_weight(dest);
0041     if (!nr_irqs) {
0042         pr_warn("Reservation for empty destination mask\n");
0043         return -EINVAL;
0044     }
0045 
0046     if (irq_domain_is_ipi_single(domain)) {
0047         /*
0048          * If the underlying implementation uses a single HW irq on
0049          * all cpus then we only need a single Linux irq number for
0050          * it. We have no restrictions vs. the destination mask. The
0051          * underlying implementation can deal with holes nicely.
0052          */
0053         nr_irqs = 1;
0054         offset = 0;
0055     } else {
0056         unsigned int next;
0057 
0058         /*
0059          * The IPI requires a separate HW irq on each CPU. We require
0060          * that the destination mask is consecutive. If an
0061          * implementation needs to support holes, it can reserve
0062          * several IPI ranges.
0063          */
0064         offset = cpumask_first(dest);
0065         /*
0066          * Find a hole and if found look for another set bit after the
0067          * hole. For now we don't support this scenario.
0068          */
0069         next = cpumask_next_zero(offset, dest);
0070         if (next < nr_cpu_ids)
0071             next = cpumask_next(next, dest);
0072         if (next < nr_cpu_ids) {
0073             pr_warn("Destination mask has holes\n");
0074             return -EINVAL;
0075         }
0076     }
0077 
0078     virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
0079     if (virq <= 0) {
0080         pr_warn("Can't reserve IPI, failed to alloc descs\n");
0081         return -ENOMEM;
0082     }
0083 
0084     virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
0085                        (void *) dest, true, NULL);
0086 
0087     if (virq <= 0) {
0088         pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
0089         goto free_descs;
0090     }
0091 
0092     for (i = 0; i < nr_irqs; i++) {
0093         data = irq_get_irq_data(virq + i);
0094         cpumask_copy(data->common->affinity, dest);
0095         data->common->ipi_offset = offset;
0096         irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
0097     }
0098     return virq;
0099 
0100 free_descs:
0101     irq_free_descs(virq, nr_irqs);
0102     return -EBUSY;
0103 }
0104 
0105 /**
0106  * irq_destroy_ipi() - unreserve an IPI that was previously allocated
0107  * @irq:    Linux IRQ number to be destroyed
0108  * @dest:   cpumask of CPUs which should have the IPI removed
0109  *
0110  * The IPIs allocated with irq_reserve_ipi() are returned to the system
0111  * destroying all virqs associated with them.
0112  *
0113  * Return: %0 on success or error code on failure.
0114  */
0115 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
0116 {
0117     struct irq_data *data = irq_get_irq_data(irq);
0118     const struct cpumask *ipimask;
0119     struct irq_domain *domain;
0120     unsigned int nr_irqs;
0121 
0122     if (!irq || !data)
0123         return -EINVAL;
0124 
0125     domain = data->domain;
0126     if (WARN_ON(domain == NULL))
0127         return -EINVAL;
0128 
0129     if (!irq_domain_is_ipi(domain)) {
0130         pr_warn("Trying to destroy a non IPI domain!\n");
0131         return -EINVAL;
0132     }
0133 
0134     ipimask = irq_data_get_affinity_mask(data);
0135     if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
0136         /*
0137          * Must be destroying a subset of CPUs to which this IPI
0138          * was set up to target
0139          */
0140         return -EINVAL;
0141 
0142     if (irq_domain_is_ipi_per_cpu(domain)) {
0143         irq = irq + cpumask_first(dest) - data->common->ipi_offset;
0144         nr_irqs = cpumask_weight(dest);
0145     } else {
0146         nr_irqs = 1;
0147     }
0148 
0149     irq_domain_free_irqs(irq, nr_irqs);
0150     return 0;
0151 }
0152 
0153 /**
0154  * ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
0155  * @irq:    Linux IRQ number
0156  * @cpu:    the target CPU
0157  *
0158  * When dealing with coprocessors IPI, we need to inform the coprocessor of
0159  * the hwirq it needs to use to receive and send IPIs.
0160  *
0161  * Return: hwirq value on success or INVALID_HWIRQ on failure.
0162  */
0163 irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
0164 {
0165     struct irq_data *data = irq_get_irq_data(irq);
0166     const struct cpumask *ipimask;
0167 
0168     if (!data || cpu >= nr_cpu_ids)
0169         return INVALID_HWIRQ;
0170 
0171     ipimask = irq_data_get_affinity_mask(data);
0172     if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
0173         return INVALID_HWIRQ;
0174 
0175     /*
0176      * Get the real hardware irq number if the underlying implementation
0177      * uses a separate irq per cpu. If the underlying implementation uses
0178      * a single hardware irq for all cpus then the IPI send mechanism
0179      * needs to take care of the cpu destinations.
0180      */
0181     if (irq_domain_is_ipi_per_cpu(data->domain))
0182         data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
0183 
0184     return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
0185 }
0186 EXPORT_SYMBOL_GPL(ipi_get_hwirq);
0187 
0188 static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
0189                const struct cpumask *dest, unsigned int cpu)
0190 {
0191     const struct cpumask *ipimask = irq_data_get_affinity_mask(data);
0192 
0193     if (!chip || !ipimask)
0194         return -EINVAL;
0195 
0196     if (!chip->ipi_send_single && !chip->ipi_send_mask)
0197         return -EINVAL;
0198 
0199     if (cpu >= nr_cpu_ids)
0200         return -EINVAL;
0201 
0202     if (dest) {
0203         if (!cpumask_subset(dest, ipimask))
0204             return -EINVAL;
0205     } else {
0206         if (!cpumask_test_cpu(cpu, ipimask))
0207             return -EINVAL;
0208     }
0209     return 0;
0210 }
0211 
0212 /**
0213  * __ipi_send_single - send an IPI to a target Linux SMP CPU
0214  * @desc:   pointer to irq_desc of the IRQ
0215  * @cpu:    destination CPU, must in the destination mask passed to
0216  *      irq_reserve_ipi()
0217  *
0218  * This function is for architecture or core code to speed up IPI sending. Not
0219  * usable from driver code.
0220  *
0221  * Return: %0 on success or negative error number on failure.
0222  */
0223 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
0224 {
0225     struct irq_data *data = irq_desc_get_irq_data(desc);
0226     struct irq_chip *chip = irq_data_get_irq_chip(data);
0227 
0228 #ifdef DEBUG
0229     /*
0230      * Minimise the overhead by omitting the checks for Linux SMP IPIs.
0231      * Since the callers should be arch or core code which is generally
0232      * trusted, only check for errors when debugging.
0233      */
0234     if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
0235         return -EINVAL;
0236 #endif
0237     if (!chip->ipi_send_single) {
0238         chip->ipi_send_mask(data, cpumask_of(cpu));
0239         return 0;
0240     }
0241 
0242     /* FIXME: Store this information in irqdata flags */
0243     if (irq_domain_is_ipi_per_cpu(data->domain) &&
0244         cpu != data->common->ipi_offset) {
0245         /* use the correct data for that cpu */
0246         unsigned irq = data->irq + cpu - data->common->ipi_offset;
0247 
0248         data = irq_get_irq_data(irq);
0249     }
0250     chip->ipi_send_single(data, cpu);
0251     return 0;
0252 }
0253 
0254 /**
0255  * __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
0256  * @desc:   pointer to irq_desc of the IRQ
0257  * @dest:   dest CPU(s), must be a subset of the mask passed to
0258  *      irq_reserve_ipi()
0259  *
0260  * This function is for architecture or core code to speed up IPI sending. Not
0261  * usable from driver code.
0262  *
0263  * Return: %0 on success or negative error number on failure.
0264  */
0265 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
0266 {
0267     struct irq_data *data = irq_desc_get_irq_data(desc);
0268     struct irq_chip *chip = irq_data_get_irq_chip(data);
0269     unsigned int cpu;
0270 
0271 #ifdef DEBUG
0272     /*
0273      * Minimise the overhead by omitting the checks for Linux SMP IPIs.
0274      * Since the callers should be arch or core code which is generally
0275      * trusted, only check for errors when debugging.
0276      */
0277     if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
0278         return -EINVAL;
0279 #endif
0280     if (chip->ipi_send_mask) {
0281         chip->ipi_send_mask(data, dest);
0282         return 0;
0283     }
0284 
0285     if (irq_domain_is_ipi_per_cpu(data->domain)) {
0286         unsigned int base = data->irq;
0287 
0288         for_each_cpu(cpu, dest) {
0289             unsigned irq = base + cpu - data->common->ipi_offset;
0290 
0291             data = irq_get_irq_data(irq);
0292             chip->ipi_send_single(data, cpu);
0293         }
0294     } else {
0295         for_each_cpu(cpu, dest)
0296             chip->ipi_send_single(data, cpu);
0297     }
0298     return 0;
0299 }
0300 
0301 /**
0302  * ipi_send_single - Send an IPI to a single CPU
0303  * @virq:   Linux IRQ number from irq_reserve_ipi()
0304  * @cpu:    destination CPU, must in the destination mask passed to
0305  *      irq_reserve_ipi()
0306  *
0307  * Return: %0 on success or negative error number on failure.
0308  */
0309 int ipi_send_single(unsigned int virq, unsigned int cpu)
0310 {
0311     struct irq_desc *desc = irq_to_desc(virq);
0312     struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
0313     struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
0314 
0315     if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
0316         return -EINVAL;
0317 
0318     return __ipi_send_single(desc, cpu);
0319 }
0320 EXPORT_SYMBOL_GPL(ipi_send_single);
0321 
0322 /**
0323  * ipi_send_mask - Send an IPI to target CPU(s)
0324  * @virq:   Linux IRQ number from irq_reserve_ipi()
0325  * @dest:   dest CPU(s), must be a subset of the mask passed to
0326  *      irq_reserve_ipi()
0327  *
0328  * Return: %0 on success or negative error number on failure.
0329  */
0330 int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
0331 {
0332     struct irq_desc *desc = irq_to_desc(virq);
0333     struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
0334     struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
0335 
0336     if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
0337         return -EINVAL;
0338 
0339     return __ipi_send_mask(desc, dest);
0340 }
0341 EXPORT_SYMBOL_GPL(ipi_send_mask);