Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Intel IXP4xx Queue Manager driver for Linux
0004  *
0005  * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
0006  */
0007 
0008 #include <linux/ioport.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/of.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/soc/ixp4xx/qmgr.h>
0015 #include <linux/soc/ixp4xx/cpu.h>
0016 
0017 static struct qmgr_regs __iomem *qmgr_regs;
0018 static int qmgr_irq_1;
0019 static int qmgr_irq_2;
0020 static spinlock_t qmgr_lock;
0021 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
0022 static void (*irq_handlers[QUEUES])(void *pdev);
0023 static void *irq_pdevs[QUEUES];
0024 
0025 #if DEBUG_QMGR
0026 char qmgr_queue_descs[QUEUES][32];
0027 #endif
0028 
0029 void qmgr_put_entry(unsigned int queue, u32 val)
0030 {
0031 #if DEBUG_QMGR
0032     BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
0033 
0034     printk(KERN_DEBUG "Queue %s(%i) put %X\n",
0035            qmgr_queue_descs[queue], queue, val);
0036 #endif
0037     __raw_writel(val, &qmgr_regs->acc[queue][0]);
0038 }
0039 
0040 u32 qmgr_get_entry(unsigned int queue)
0041 {
0042     u32 val;
0043     val = __raw_readl(&qmgr_regs->acc[queue][0]);
0044 #if DEBUG_QMGR
0045     BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
0046 
0047     printk(KERN_DEBUG "Queue %s(%i) get %X\n",
0048            qmgr_queue_descs[queue], queue, val);
0049 #endif
0050     return val;
0051 }
0052 
0053 static int __qmgr_get_stat1(unsigned int queue)
0054 {
0055     return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
0056         >> ((queue & 7) << 2)) & 0xF;
0057 }
0058 
0059 static int __qmgr_get_stat2(unsigned int queue)
0060 {
0061     BUG_ON(queue >= HALF_QUEUES);
0062     return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
0063         >> ((queue & 0xF) << 1)) & 0x3;
0064 }
0065 
0066 /**
0067  * qmgr_stat_empty() - checks if a hardware queue is empty
0068  * @queue:  queue number
0069  *
0070  * Returns non-zero value if the queue is empty.
0071  */
0072 int qmgr_stat_empty(unsigned int queue)
0073 {
0074     BUG_ON(queue >= HALF_QUEUES);
0075     return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
0076 }
0077 
0078 /**
0079  * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
0080  * @queue:  queue number
0081  *
0082  * Returns non-zero value if the queue is below low watermark.
0083  */
0084 int qmgr_stat_below_low_watermark(unsigned int queue)
0085 {
0086     if (queue >= HALF_QUEUES)
0087         return (__raw_readl(&qmgr_regs->statne_h) >>
0088             (queue - HALF_QUEUES)) & 0x01;
0089     return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
0090 }
0091 
0092 /**
0093  * qmgr_stat_full() - checks if a hardware queue is full
0094  * @queue:  queue number
0095  *
0096  * Returns non-zero value if the queue is full.
0097  */
0098 int qmgr_stat_full(unsigned int queue)
0099 {
0100     if (queue >= HALF_QUEUES)
0101         return (__raw_readl(&qmgr_regs->statf_h) >>
0102             (queue - HALF_QUEUES)) & 0x01;
0103     return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
0104 }
0105 
0106 /**
0107  * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
0108  * @queue:  queue number
0109  *
0110  * Returns non-zero value if the queue experienced overflow.
0111  */
0112 int qmgr_stat_overflow(unsigned int queue)
0113 {
0114     return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
0115 }
0116 
0117 void qmgr_set_irq(unsigned int queue, int src,
0118           void (*handler)(void *pdev), void *pdev)
0119 {
0120     unsigned long flags;
0121 
0122     spin_lock_irqsave(&qmgr_lock, flags);
0123     if (queue < HALF_QUEUES) {
0124         u32 __iomem *reg;
0125         int bit;
0126         BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
0127         reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
0128         bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
0129         __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
0130                  reg);
0131     } else
0132         /* IRQ source for queues 32-63 is fixed */
0133         BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
0134 
0135     irq_handlers[queue] = handler;
0136     irq_pdevs[queue] = pdev;
0137     spin_unlock_irqrestore(&qmgr_lock, flags);
0138 }
0139 
0140 
0141 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
0142 {
0143     int i, ret = 0;
0144     u32 en_bitmap, src, stat;
0145 
0146     /* ACK - it may clear any bits so don't rely on it */
0147     __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
0148 
0149     en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
0150     while (en_bitmap) {
0151         i = __fls(en_bitmap); /* number of the last "low" queue */
0152         en_bitmap &= ~BIT(i);
0153         src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
0154         stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
0155         if (src & 4) /* the IRQ condition is inverted */
0156             stat = ~stat;
0157         if (stat & BIT(src & 3)) {
0158             irq_handlers[i](irq_pdevs[i]);
0159             ret = IRQ_HANDLED;
0160         }
0161     }
0162     return ret;
0163 }
0164 
0165 
0166 static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
0167 {
0168     int i, ret = 0;
0169     u32 req_bitmap;
0170 
0171     /* ACK - it may clear any bits so don't rely on it */
0172     __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
0173 
0174     req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
0175              __raw_readl(&qmgr_regs->statne_h);
0176     while (req_bitmap) {
0177         i = __fls(req_bitmap); /* number of the last "high" queue */
0178         req_bitmap &= ~BIT(i);
0179         irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
0180         ret = IRQ_HANDLED;
0181     }
0182     return ret;
0183 }
0184 
0185 
0186 static irqreturn_t qmgr_irq(int irq, void *pdev)
0187 {
0188     int i, half = (irq == qmgr_irq_1 ? 0 : 1);
0189     u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
0190 
0191     if (!req_bitmap)
0192         return 0;
0193     __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
0194 
0195     while (req_bitmap) {
0196         i = __fls(req_bitmap); /* number of the last queue */
0197         req_bitmap &= ~BIT(i);
0198         i += half * HALF_QUEUES;
0199         irq_handlers[i](irq_pdevs[i]);
0200     }
0201     return IRQ_HANDLED;
0202 }
0203 
0204 
0205 void qmgr_enable_irq(unsigned int queue)
0206 {
0207     unsigned long flags;
0208     int half = queue / 32;
0209     u32 mask = 1 << (queue & (HALF_QUEUES - 1));
0210 
0211     spin_lock_irqsave(&qmgr_lock, flags);
0212     __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
0213              &qmgr_regs->irqen[half]);
0214     spin_unlock_irqrestore(&qmgr_lock, flags);
0215 }
0216 
0217 void qmgr_disable_irq(unsigned int queue)
0218 {
0219     unsigned long flags;
0220     int half = queue / 32;
0221     u32 mask = 1 << (queue & (HALF_QUEUES - 1));
0222 
0223     spin_lock_irqsave(&qmgr_lock, flags);
0224     __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
0225              &qmgr_regs->irqen[half]);
0226     __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
0227     spin_unlock_irqrestore(&qmgr_lock, flags);
0228 }
0229 
0230 static inline void shift_mask(u32 *mask)
0231 {
0232     mask[3] = mask[3] << 1 | mask[2] >> 31;
0233     mask[2] = mask[2] << 1 | mask[1] >> 31;
0234     mask[1] = mask[1] << 1 | mask[0] >> 31;
0235     mask[0] <<= 1;
0236 }
0237 
0238 #if DEBUG_QMGR
0239 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
0240                unsigned int nearly_empty_watermark,
0241                unsigned int nearly_full_watermark,
0242                const char *desc_format, const char* name)
0243 #else
0244 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
0245              unsigned int nearly_empty_watermark,
0246              unsigned int nearly_full_watermark)
0247 #endif
0248 {
0249     u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
0250     int err;
0251 
0252     BUG_ON(queue >= QUEUES);
0253 
0254     if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
0255         return -EINVAL;
0256 
0257     switch (len) {
0258     case  16:
0259         cfg = 0 << 24;
0260         mask[0] = 0x1;
0261         break;
0262     case  32:
0263         cfg = 1 << 24;
0264         mask[0] = 0x3;
0265         break;
0266     case  64:
0267         cfg = 2 << 24;
0268         mask[0] = 0xF;
0269         break;
0270     case 128:
0271         cfg = 3 << 24;
0272         mask[0] = 0xFF;
0273         break;
0274     default:
0275         return -EINVAL;
0276     }
0277 
0278     cfg |= nearly_empty_watermark << 26;
0279     cfg |= nearly_full_watermark << 29;
0280     len /= 16;      /* in 16-dwords: 1, 2, 4 or 8 */
0281     mask[1] = mask[2] = mask[3] = 0;
0282 
0283     if (!try_module_get(THIS_MODULE))
0284         return -ENODEV;
0285 
0286     spin_lock_irq(&qmgr_lock);
0287     if (__raw_readl(&qmgr_regs->sram[queue])) {
0288         err = -EBUSY;
0289         goto err;
0290     }
0291 
0292     while (1) {
0293         if (!(used_sram_bitmap[0] & mask[0]) &&
0294             !(used_sram_bitmap[1] & mask[1]) &&
0295             !(used_sram_bitmap[2] & mask[2]) &&
0296             !(used_sram_bitmap[3] & mask[3]))
0297             break; /* found free space */
0298 
0299         addr++;
0300         shift_mask(mask);
0301         if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
0302             printk(KERN_ERR "qmgr: no free SRAM space for"
0303                    " queue %i\n", queue);
0304             err = -ENOMEM;
0305             goto err;
0306         }
0307     }
0308 
0309     used_sram_bitmap[0] |= mask[0];
0310     used_sram_bitmap[1] |= mask[1];
0311     used_sram_bitmap[2] |= mask[2];
0312     used_sram_bitmap[3] |= mask[3];
0313     __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
0314 #if DEBUG_QMGR
0315     snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
0316          desc_format, name);
0317     printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
0318            qmgr_queue_descs[queue], queue, addr);
0319 #endif
0320     spin_unlock_irq(&qmgr_lock);
0321     return 0;
0322 
0323 err:
0324     spin_unlock_irq(&qmgr_lock);
0325     module_put(THIS_MODULE);
0326     return err;
0327 }
0328 
0329 void qmgr_release_queue(unsigned int queue)
0330 {
0331     u32 cfg, addr, mask[4];
0332 
0333     BUG_ON(queue >= QUEUES); /* not in valid range */
0334 
0335     spin_lock_irq(&qmgr_lock);
0336     cfg = __raw_readl(&qmgr_regs->sram[queue]);
0337     addr = (cfg >> 14) & 0xFF;
0338 
0339     BUG_ON(!addr);      /* not requested */
0340 
0341     switch ((cfg >> 24) & 3) {
0342     case 0: mask[0] = 0x1; break;
0343     case 1: mask[0] = 0x3; break;
0344     case 2: mask[0] = 0xF; break;
0345     case 3: mask[0] = 0xFF; break;
0346     }
0347 
0348     mask[1] = mask[2] = mask[3] = 0;
0349 
0350     while (addr--)
0351         shift_mask(mask);
0352 
0353 #if DEBUG_QMGR
0354     printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
0355            qmgr_queue_descs[queue], queue);
0356     qmgr_queue_descs[queue][0] = '\x0';
0357 #endif
0358 
0359     while ((addr = qmgr_get_entry(queue)))
0360         printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
0361                queue, addr);
0362 
0363     __raw_writel(0, &qmgr_regs->sram[queue]);
0364 
0365     used_sram_bitmap[0] &= ~mask[0];
0366     used_sram_bitmap[1] &= ~mask[1];
0367     used_sram_bitmap[2] &= ~mask[2];
0368     used_sram_bitmap[3] &= ~mask[3];
0369     irq_handlers[queue] = NULL; /* catch IRQ bugs */
0370     spin_unlock_irq(&qmgr_lock);
0371 
0372     module_put(THIS_MODULE);
0373 }
0374 
0375 static int ixp4xx_qmgr_probe(struct platform_device *pdev)
0376 {
0377     int i, err;
0378     irq_handler_t handler1, handler2;
0379     struct device *dev = &pdev->dev;
0380     struct resource *res;
0381     int irq1, irq2;
0382 
0383     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0384     if (!res)
0385         return -ENODEV;
0386     qmgr_regs = devm_ioremap_resource(dev, res);
0387     if (IS_ERR(qmgr_regs))
0388         return PTR_ERR(qmgr_regs);
0389 
0390     irq1 = platform_get_irq(pdev, 0);
0391     if (irq1 <= 0)
0392         return irq1 ? irq1 : -EINVAL;
0393     qmgr_irq_1 = irq1;
0394     irq2 = platform_get_irq(pdev, 1);
0395     if (irq2 <= 0)
0396         return irq2 ? irq2 : -EINVAL;
0397     qmgr_irq_2 = irq2;
0398 
0399     /* reset qmgr registers */
0400     for (i = 0; i < 4; i++) {
0401         __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
0402         __raw_writel(0, &qmgr_regs->irqsrc[i]);
0403     }
0404     for (i = 0; i < 2; i++) {
0405         __raw_writel(0, &qmgr_regs->stat2[i]);
0406         __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
0407         __raw_writel(0, &qmgr_regs->irqen[i]);
0408     }
0409 
0410     __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
0411     __raw_writel(0, &qmgr_regs->statf_h);
0412 
0413     for (i = 0; i < QUEUES; i++)
0414         __raw_writel(0, &qmgr_regs->sram[i]);
0415 
0416     if (cpu_is_ixp42x_rev_a0()) {
0417         handler1 = qmgr_irq1_a0;
0418         handler2 = qmgr_irq2_a0;
0419     } else
0420         handler1 = handler2 = qmgr_irq;
0421 
0422     err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
0423                    NULL);
0424     if (err) {
0425         dev_err(dev, "failed to request IRQ%i (%i)\n",
0426             irq1, err);
0427         return err;
0428     }
0429 
0430     err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
0431                    NULL);
0432     if (err) {
0433         dev_err(dev, "failed to request IRQ%i (%i)\n",
0434             irq2, err);
0435         return err;
0436     }
0437 
0438     used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
0439     spin_lock_init(&qmgr_lock);
0440 
0441     dev_info(dev, "IXP4xx Queue Manager initialized.\n");
0442     return 0;
0443 }
0444 
0445 static int ixp4xx_qmgr_remove(struct platform_device *pdev)
0446 {
0447     synchronize_irq(qmgr_irq_1);
0448     synchronize_irq(qmgr_irq_2);
0449     return 0;
0450 }
0451 
0452 static const struct of_device_id ixp4xx_qmgr_of_match[] = {
0453     {
0454         .compatible = "intel,ixp4xx-ahb-queue-manager",
0455         },
0456     {},
0457 };
0458 
0459 static struct platform_driver ixp4xx_qmgr_driver = {
0460     .driver = {
0461         .name           = "ixp4xx-qmgr",
0462         .of_match_table = ixp4xx_qmgr_of_match,
0463     },
0464     .probe = ixp4xx_qmgr_probe,
0465     .remove = ixp4xx_qmgr_remove,
0466 };
0467 module_platform_driver(ixp4xx_qmgr_driver);
0468 
0469 MODULE_LICENSE("GPL v2");
0470 MODULE_AUTHOR("Krzysztof Halasa");
0471 
0472 EXPORT_SYMBOL(qmgr_put_entry);
0473 EXPORT_SYMBOL(qmgr_get_entry);
0474 EXPORT_SYMBOL(qmgr_stat_empty);
0475 EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
0476 EXPORT_SYMBOL(qmgr_stat_full);
0477 EXPORT_SYMBOL(qmgr_stat_overflow);
0478 EXPORT_SYMBOL(qmgr_set_irq);
0479 EXPORT_SYMBOL(qmgr_enable_irq);
0480 EXPORT_SYMBOL(qmgr_disable_irq);
0481 #if DEBUG_QMGR
0482 EXPORT_SYMBOL(qmgr_queue_descs);
0483 EXPORT_SYMBOL(qmgr_request_queue);
0484 #else
0485 EXPORT_SYMBOL(__qmgr_request_queue);
0486 #endif
0487 EXPORT_SYMBOL(qmgr_release_queue);