Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Keystone accumulator queue manager
0004  *
0005  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
0006  * Author:  Sandeep Nair <sandeep_n@ti.com>
0007  *      Cyril Chemparathy <cyril@ti.com>
0008  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
0009  */
0010 
0011 #include <linux/dma-mapping.h>
0012 #include <linux/io.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/module.h>
0015 #include <linux/of_address.h>
0016 #include <linux/soc/ti/knav_qmss.h>
0017 
0018 #include "knav_qmss.h"
0019 
0020 #define knav_range_offset_to_inst(kdev, range, q)   \
0021     (range->queue_base_inst + (q << kdev->inst_shift))
0022 
0023 static void __knav_acc_notify(struct knav_range_info *range,
0024                 struct knav_acc_channel *acc)
0025 {
0026     struct knav_device *kdev = range->kdev;
0027     struct knav_queue_inst *inst;
0028     int range_base, queue;
0029 
0030     range_base = kdev->base_id + range->queue_base;
0031 
0032     if (range->flags & RANGE_MULTI_QUEUE) {
0033         for (queue = 0; queue < range->num_queues; queue++) {
0034             inst = knav_range_offset_to_inst(kdev, range,
0035                                 queue);
0036             if (inst->notify_needed) {
0037                 inst->notify_needed = 0;
0038                 dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
0039                     range_base + queue);
0040                 knav_queue_notify(inst);
0041             }
0042         }
0043     } else {
0044         queue = acc->channel - range->acc_info.start_channel;
0045         inst = knav_range_offset_to_inst(kdev, range, queue);
0046         dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
0047             range_base + queue);
0048         knav_queue_notify(inst);
0049     }
0050 }
0051 
0052 static int knav_acc_set_notify(struct knav_range_info *range,
0053                 struct knav_queue_inst *kq,
0054                 bool enabled)
0055 {
0056     struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
0057     struct knav_device *kdev = range->kdev;
0058     u32 mask, offset;
0059 
0060     /*
0061      * when enabling, we need to re-trigger an interrupt if we
0062      * have descriptors pending
0063      */
0064     if (!enabled || atomic_read(&kq->desc_count) <= 0)
0065         return 0;
0066 
0067     kq->notify_needed = 1;
0068     atomic_inc(&kq->acc->retrigger_count);
0069     mask = BIT(kq->acc->channel % 32);
0070     offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
0071     dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
0072         kq->acc->name);
0073     writel_relaxed(mask, pdsp->intd + offset);
0074     return 0;
0075 }
0076 
0077 static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
0078 {
0079     struct knav_acc_channel *acc;
0080     struct knav_queue_inst *kq = NULL;
0081     struct knav_range_info *range;
0082     struct knav_pdsp_info *pdsp;
0083     struct knav_acc_info *info;
0084     struct knav_device *kdev;
0085 
0086     u32 *list, *list_cpu, val, idx, notifies;
0087     int range_base, channel, queue = 0;
0088     dma_addr_t list_dma;
0089 
0090     range = _instdata;
0091     info  = &range->acc_info;
0092     kdev  = range->kdev;
0093     pdsp  = range->acc_info.pdsp;
0094     acc   = range->acc;
0095 
0096     range_base = kdev->base_id + range->queue_base;
0097     if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
0098         for (queue = 0; queue < range->num_irqs; queue++)
0099             if (range->irqs[queue].irq == irq)
0100                 break;
0101         kq = knav_range_offset_to_inst(kdev, range, queue);
0102         acc += queue;
0103     }
0104 
0105     channel = acc->channel;
0106     list_dma = acc->list_dma[acc->list_index];
0107     list_cpu = acc->list_cpu[acc->list_index];
0108     dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
0109         channel, acc->list_index, list_cpu, &list_dma);
0110     if (atomic_read(&acc->retrigger_count)) {
0111         atomic_dec(&acc->retrigger_count);
0112         __knav_acc_notify(range, acc);
0113         writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
0114         /* ack the interrupt */
0115         writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
0116                    pdsp->intd + ACC_INTD_OFFSET_EOI);
0117 
0118         return IRQ_HANDLED;
0119     }
0120 
0121     notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
0122     WARN_ON(!notifies);
0123     dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
0124                 DMA_FROM_DEVICE);
0125 
0126     for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
0127          list += ACC_LIST_ENTRY_WORDS) {
0128         if (ACC_LIST_ENTRY_WORDS == 1) {
0129             dev_dbg(kdev->dev,
0130                 "acc-irq: list %d, entry @%p, %08x\n",
0131                 acc->list_index, list, list[0]);
0132         } else if (ACC_LIST_ENTRY_WORDS == 2) {
0133             dev_dbg(kdev->dev,
0134                 "acc-irq: list %d, entry @%p, %08x %08x\n",
0135                 acc->list_index, list, list[0], list[1]);
0136         } else if (ACC_LIST_ENTRY_WORDS == 4) {
0137             dev_dbg(kdev->dev,
0138                 "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
0139                 acc->list_index, list, list[0], list[1],
0140                 list[2], list[3]);
0141         }
0142 
0143         val = list[ACC_LIST_ENTRY_DESC_IDX];
0144         if (!val)
0145             break;
0146 
0147         if (range->flags & RANGE_MULTI_QUEUE) {
0148             queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
0149             if (queue < range_base ||
0150                 queue >= range_base + range->num_queues) {
0151                 dev_err(kdev->dev,
0152                     "bad queue %d, expecting %d-%d\n",
0153                     queue, range_base,
0154                     range_base + range->num_queues);
0155                 break;
0156             }
0157             queue -= range_base;
0158             kq = knav_range_offset_to_inst(kdev, range,
0159                                 queue);
0160         }
0161 
0162         if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
0163             atomic_dec(&kq->desc_count);
0164             dev_err(kdev->dev,
0165                 "acc-irq: queue %d full, entry dropped\n",
0166                 queue + range_base);
0167             continue;
0168         }
0169 
0170         idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
0171         kq->descs[idx] = val;
0172         kq->notify_needed = 1;
0173         dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
0174             val, idx, queue + range_base);
0175     }
0176 
0177     __knav_acc_notify(range, acc);
0178     memset(list_cpu, 0, info->list_size);
0179     dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
0180                    DMA_TO_DEVICE);
0181 
0182     /* flip to the other list */
0183     acc->list_index ^= 1;
0184 
0185     /* reset the interrupt counter */
0186     writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
0187 
0188     /* ack the interrupt */
0189     writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
0190                pdsp->intd + ACC_INTD_OFFSET_EOI);
0191 
0192     return IRQ_HANDLED;
0193 }
0194 
0195 static int knav_range_setup_acc_irq(struct knav_range_info *range,
0196                 int queue, bool enabled)
0197 {
0198     struct knav_device *kdev = range->kdev;
0199     struct knav_acc_channel *acc;
0200     struct cpumask *cpu_mask;
0201     int ret = 0, irq;
0202     u32 old, new;
0203 
0204     if (range->flags & RANGE_MULTI_QUEUE) {
0205         acc = range->acc;
0206         irq = range->irqs[0].irq;
0207         cpu_mask = range->irqs[0].cpu_mask;
0208     } else {
0209         acc = range->acc + queue;
0210         irq = range->irqs[queue].irq;
0211         cpu_mask = range->irqs[queue].cpu_mask;
0212     }
0213 
0214     old = acc->open_mask;
0215     if (enabled)
0216         new = old | BIT(queue);
0217     else
0218         new = old & ~BIT(queue);
0219     acc->open_mask = new;
0220 
0221     dev_dbg(kdev->dev,
0222         "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
0223         old, new, acc->name);
0224 
0225     if (likely(new == old))
0226         return 0;
0227 
0228     if (new && !old) {
0229         dev_dbg(kdev->dev,
0230             "setup-acc-irq: requesting %s for channel %s\n",
0231             acc->name, acc->name);
0232         ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
0233                   range);
0234         if (!ret && cpu_mask) {
0235             ret = irq_set_affinity_hint(irq, cpu_mask);
0236             if (ret) {
0237                 dev_warn(range->kdev->dev,
0238                      "Failed to set IRQ affinity\n");
0239                 return ret;
0240             }
0241         }
0242     }
0243 
0244     if (old && !new) {
0245         dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
0246             acc->name, acc->name);
0247         ret = irq_set_affinity_hint(irq, NULL);
0248         if (ret)
0249             dev_warn(range->kdev->dev,
0250                  "Failed to set IRQ affinity\n");
0251         free_irq(irq, range);
0252     }
0253 
0254     return ret;
0255 }
0256 
0257 static const char *knav_acc_result_str(enum knav_acc_result result)
0258 {
0259     static const char * const result_str[] = {
0260         [ACC_RET_IDLE]          = "idle",
0261         [ACC_RET_SUCCESS]       = "success",
0262         [ACC_RET_INVALID_COMMAND]   = "invalid command",
0263         [ACC_RET_INVALID_CHANNEL]   = "invalid channel",
0264         [ACC_RET_INACTIVE_CHANNEL]  = "inactive channel",
0265         [ACC_RET_ACTIVE_CHANNEL]    = "active channel",
0266         [ACC_RET_INVALID_QUEUE]     = "invalid queue",
0267         [ACC_RET_INVALID_RET]       = "invalid return code",
0268     };
0269 
0270     if (result >= ARRAY_SIZE(result_str))
0271         return result_str[ACC_RET_INVALID_RET];
0272     else
0273         return result_str[result];
0274 }
0275 
0276 static enum knav_acc_result
0277 knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
0278         struct knav_reg_acc_command *cmd)
0279 {
0280     u32 result;
0281 
0282     dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
0283         cmd->command, cmd->queue_mask, cmd->list_dma,
0284         cmd->queue_num, cmd->timer_config);
0285 
0286     writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
0287     writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
0288     writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
0289     writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
0290     writel_relaxed(cmd->command, &pdsp->acc_command->command);
0291 
0292     /* wait for the command to clear */
0293     do {
0294         result = readl_relaxed(&pdsp->acc_command->command);
0295     } while ((result >> 8) & 0xff);
0296 
0297     return (result >> 24) & 0xff;
0298 }
0299 
0300 static void knav_acc_setup_cmd(struct knav_device *kdev,
0301                 struct knav_range_info *range,
0302                 struct knav_reg_acc_command *cmd,
0303                 int queue)
0304 {
0305     struct knav_acc_info *info = &range->acc_info;
0306     struct knav_acc_channel *acc;
0307     int queue_base;
0308     u32 queue_mask;
0309 
0310     if (range->flags & RANGE_MULTI_QUEUE) {
0311         acc = range->acc;
0312         queue_base = range->queue_base;
0313         queue_mask = BIT(range->num_queues) - 1;
0314     } else {
0315         acc = range->acc + queue;
0316         queue_base = range->queue_base + queue;
0317         queue_mask = 0;
0318     }
0319 
0320     memset(cmd, 0, sizeof(*cmd));
0321     cmd->command    = acc->channel;
0322     cmd->queue_mask = queue_mask;
0323     cmd->list_dma   = (u32)acc->list_dma[0];
0324     cmd->queue_num  = info->list_entries << 16;
0325     cmd->queue_num |= queue_base;
0326 
0327     cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
0328     if (range->flags & RANGE_MULTI_QUEUE)
0329         cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
0330     cmd->timer_config |= info->pacing_mode << 16;
0331     cmd->timer_config |= info->timer_count;
0332 }
0333 
0334 static void knav_acc_stop(struct knav_device *kdev,
0335                 struct knav_range_info *range,
0336                 int queue)
0337 {
0338     struct knav_reg_acc_command cmd;
0339     struct knav_acc_channel *acc;
0340     enum knav_acc_result result;
0341 
0342     acc = range->acc + queue;
0343 
0344     knav_acc_setup_cmd(kdev, range, &cmd, queue);
0345     cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
0346     result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
0347 
0348     dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
0349         acc->name, knav_acc_result_str(result));
0350 }
0351 
0352 static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
0353                         struct knav_range_info *range,
0354                         int queue)
0355 {
0356     struct knav_reg_acc_command cmd;
0357     struct knav_acc_channel *acc;
0358     enum knav_acc_result result;
0359 
0360     acc = range->acc + queue;
0361 
0362     knav_acc_setup_cmd(kdev, range, &cmd, queue);
0363     cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
0364     result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
0365 
0366     dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
0367         acc->name, knav_acc_result_str(result));
0368 
0369     return result;
0370 }
0371 
0372 static int knav_acc_init_range(struct knav_range_info *range)
0373 {
0374     struct knav_device *kdev = range->kdev;
0375     struct knav_acc_channel *acc;
0376     enum knav_acc_result result;
0377     int queue;
0378 
0379     for (queue = 0; queue < range->num_queues; queue++) {
0380         acc = range->acc + queue;
0381 
0382         knav_acc_stop(kdev, range, queue);
0383         acc->list_index = 0;
0384         result = knav_acc_start(kdev, range, queue);
0385 
0386         if (result != ACC_RET_SUCCESS)
0387             return -EIO;
0388 
0389         if (range->flags & RANGE_MULTI_QUEUE)
0390             return 0;
0391     }
0392     return 0;
0393 }
0394 
0395 static int knav_acc_init_queue(struct knav_range_info *range,
0396                 struct knav_queue_inst *kq)
0397 {
0398     unsigned id = kq->id - range->queue_base;
0399 
0400     kq->descs = devm_kcalloc(range->kdev->dev,
0401                  ACC_DESCS_MAX, sizeof(u32), GFP_KERNEL);
0402     if (!kq->descs)
0403         return -ENOMEM;
0404 
0405     kq->acc = range->acc;
0406     if ((range->flags & RANGE_MULTI_QUEUE) == 0)
0407         kq->acc += id;
0408     return 0;
0409 }
0410 
0411 static int knav_acc_open_queue(struct knav_range_info *range,
0412                 struct knav_queue_inst *inst, unsigned flags)
0413 {
0414     unsigned id = inst->id - range->queue_base;
0415 
0416     return knav_range_setup_acc_irq(range, id, true);
0417 }
0418 
0419 static int knav_acc_close_queue(struct knav_range_info *range,
0420                     struct knav_queue_inst *inst)
0421 {
0422     unsigned id = inst->id - range->queue_base;
0423 
0424     return knav_range_setup_acc_irq(range, id, false);
0425 }
0426 
0427 static int knav_acc_free_range(struct knav_range_info *range)
0428 {
0429     struct knav_device *kdev = range->kdev;
0430     struct knav_acc_channel *acc;
0431     struct knav_acc_info *info;
0432     int channel, channels;
0433 
0434     info = &range->acc_info;
0435 
0436     if (range->flags & RANGE_MULTI_QUEUE)
0437         channels = 1;
0438     else
0439         channels = range->num_queues;
0440 
0441     for (channel = 0; channel < channels; channel++) {
0442         acc = range->acc + channel;
0443         if (!acc->list_cpu[0])
0444             continue;
0445         dma_unmap_single(kdev->dev, acc->list_dma[0],
0446                  info->mem_size, DMA_BIDIRECTIONAL);
0447         free_pages_exact(acc->list_cpu[0], info->mem_size);
0448     }
0449     devm_kfree(range->kdev->dev, range->acc);
0450     return 0;
0451 }
0452 
0453 static struct knav_range_ops knav_acc_range_ops = {
0454     .set_notify = knav_acc_set_notify,
0455     .init_queue = knav_acc_init_queue,
0456     .open_queue = knav_acc_open_queue,
0457     .close_queue    = knav_acc_close_queue,
0458     .init_range = knav_acc_init_range,
0459     .free_range = knav_acc_free_range,
0460 };
0461 
0462 /**
0463  * knav_init_acc_range: Initialise accumulator ranges
0464  *
0465  * @kdev:       qmss device
0466  * @node:       device node
0467  * @range:      qmms range information
0468  *
0469  * Return 0 on success or error
0470  */
0471 int knav_init_acc_range(struct knav_device *kdev,
0472             struct device_node *node,
0473             struct knav_range_info *range)
0474 {
0475     struct knav_acc_channel *acc;
0476     struct knav_pdsp_info *pdsp;
0477     struct knav_acc_info *info;
0478     int ret, channel, channels;
0479     int list_size, mem_size;
0480     dma_addr_t list_dma;
0481     void *list_mem;
0482     u32 config[5];
0483 
0484     range->flags |= RANGE_HAS_ACCUMULATOR;
0485     info = &range->acc_info;
0486 
0487     ret = of_property_read_u32_array(node, "accumulator", config, 5);
0488     if (ret)
0489         return ret;
0490 
0491     info->pdsp_id       = config[0];
0492     info->start_channel = config[1];
0493     info->list_entries  = config[2];
0494     info->pacing_mode   = config[3];
0495     info->timer_count   = config[4] / ACC_DEFAULT_PERIOD;
0496 
0497     if (info->start_channel > ACC_MAX_CHANNEL) {
0498         dev_err(kdev->dev, "channel %d invalid for range %s\n",
0499             info->start_channel, range->name);
0500         return -EINVAL;
0501     }
0502 
0503     if (info->pacing_mode > 3) {
0504         dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
0505             info->pacing_mode, range->name);
0506         return -EINVAL;
0507     }
0508 
0509     pdsp = knav_find_pdsp(kdev, info->pdsp_id);
0510     if (!pdsp) {
0511         dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
0512             info->pdsp_id, range->name);
0513         return -EINVAL;
0514     }
0515 
0516     if (!pdsp->started) {
0517         dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
0518             info->pdsp_id, range->name);
0519         return -ENODEV;
0520     }
0521 
0522     info->pdsp = pdsp;
0523     channels = range->num_queues;
0524     if (of_get_property(node, "multi-queue", NULL)) {
0525         range->flags |= RANGE_MULTI_QUEUE;
0526         channels = 1;
0527         if (range->queue_base & (32 - 1)) {
0528             dev_err(kdev->dev,
0529                 "misaligned multi-queue accumulator range %s\n",
0530                 range->name);
0531             return -EINVAL;
0532         }
0533         if (range->num_queues > 32) {
0534             dev_err(kdev->dev,
0535                 "too many queues in accumulator range %s\n",
0536                 range->name);
0537             return -EINVAL;
0538         }
0539     }
0540 
0541     /* figure out list size */
0542     list_size  = info->list_entries;
0543     list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
0544     info->list_size = list_size;
0545     mem_size   = PAGE_ALIGN(list_size * 2);
0546     info->mem_size  = mem_size;
0547     range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
0548                   GFP_KERNEL);
0549     if (!range->acc)
0550         return -ENOMEM;
0551 
0552     for (channel = 0; channel < channels; channel++) {
0553         acc = range->acc + channel;
0554         acc->channel = info->start_channel + channel;
0555 
0556         /* allocate memory for the two lists */
0557         list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
0558         if (!list_mem)
0559             return -ENOMEM;
0560 
0561         list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
0562                       DMA_BIDIRECTIONAL);
0563         if (dma_mapping_error(kdev->dev, list_dma)) {
0564             free_pages_exact(list_mem, mem_size);
0565             return -ENOMEM;
0566         }
0567 
0568         memset(list_mem, 0, mem_size);
0569         dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
0570                        DMA_TO_DEVICE);
0571         scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
0572               acc->channel);
0573         acc->list_cpu[0] = list_mem;
0574         acc->list_cpu[1] = list_mem + list_size;
0575         acc->list_dma[0] = list_dma;
0576         acc->list_dma[1] = list_dma + list_size;
0577         dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
0578             acc->name, acc->channel, &list_dma, list_mem);
0579     }
0580 
0581     range->ops = &knav_acc_range_ops;
0582     return 0;
0583 }
0584 EXPORT_SYMBOL_GPL(knav_init_acc_range);