0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/debugfs.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/firmware.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/module.h>
0017 #include <linux/of_address.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of_irq.h>
0020 #include <linux/pm_runtime.h>
0021 #include <linux/slab.h>
0022 #include <linux/soc/ti/knav_qmss.h>
0023
0024 #include "knav_qmss.h"
0025
0026 static struct knav_device *kdev;
0027 static DEFINE_MUTEX(knav_dev_lock);
0028 #define knav_dev_lock_held() \
0029 lockdep_is_held(&knav_dev_lock)
0030
0031
0032 #define KNAV_QUEUE_PEEK_REG_INDEX 0
0033 #define KNAV_QUEUE_STATUS_REG_INDEX 1
0034 #define KNAV_QUEUE_CONFIG_REG_INDEX 2
0035 #define KNAV_QUEUE_REGION_REG_INDEX 3
0036 #define KNAV_QUEUE_PUSH_REG_INDEX 4
0037 #define KNAV_QUEUE_POP_REG_INDEX 5
0038
0039
0040
0041
0042
0043
0044 #define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
0045 #define KNAV_L_QUEUE_REGION_REG_INDEX 2
0046 #define KNAV_L_QUEUE_PUSH_REG_INDEX 3
0047
0048
0049 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
0050 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
0051 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
0052 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
0053
0054 #define knav_queue_idx_to_inst(kdev, idx) \
0055 (kdev->instances + (idx << kdev->inst_shift))
0056
0057 #define for_each_handle_rcu(qh, inst) \
0058 list_for_each_entry_rcu(qh, &inst->handles, list, \
0059 knav_dev_lock_held())
0060
0061 #define for_each_instance(idx, inst, kdev) \
0062 for (idx = 0, inst = kdev->instances; \
0063 idx < (kdev)->num_queues_in_use; \
0064 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
0065
0066
0067
0068
0069
0070 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
0071
0072 static bool device_ready;
0073 bool knav_qmss_device_ready(void)
0074 {
0075 return device_ready;
0076 }
0077 EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
0078
0079
0080
0081
0082
0083
0084 void knav_queue_notify(struct knav_queue_inst *inst)
0085 {
0086 struct knav_queue *qh;
0087
0088 if (!inst)
0089 return;
0090
0091 rcu_read_lock();
0092 for_each_handle_rcu(qh, inst) {
0093 if (atomic_read(&qh->notifier_enabled) <= 0)
0094 continue;
0095 if (WARN_ON(!qh->notifier_fn))
0096 continue;
0097 this_cpu_inc(qh->stats->notifies);
0098 qh->notifier_fn(qh->notifier_fn_arg);
0099 }
0100 rcu_read_unlock();
0101 }
0102 EXPORT_SYMBOL_GPL(knav_queue_notify);
0103
0104 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
0105 {
0106 struct knav_queue_inst *inst = _instdata;
0107
0108 knav_queue_notify(inst);
0109 return IRQ_HANDLED;
0110 }
0111
0112 static int knav_queue_setup_irq(struct knav_range_info *range,
0113 struct knav_queue_inst *inst)
0114 {
0115 unsigned queue = inst->id - range->queue_base;
0116 int ret = 0, irq;
0117
0118 if (range->flags & RANGE_HAS_IRQ) {
0119 irq = range->irqs[queue].irq;
0120 ret = request_irq(irq, knav_queue_int_handler, 0,
0121 inst->irq_name, inst);
0122 if (ret)
0123 return ret;
0124 disable_irq(irq);
0125 if (range->irqs[queue].cpu_mask) {
0126 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
0127 if (ret) {
0128 dev_warn(range->kdev->dev,
0129 "Failed to set IRQ affinity\n");
0130 return ret;
0131 }
0132 }
0133 }
0134 return ret;
0135 }
0136
0137 static void knav_queue_free_irq(struct knav_queue_inst *inst)
0138 {
0139 struct knav_range_info *range = inst->range;
0140 unsigned queue = inst->id - inst->range->queue_base;
0141 int irq;
0142
0143 if (range->flags & RANGE_HAS_IRQ) {
0144 irq = range->irqs[queue].irq;
0145 irq_set_affinity_hint(irq, NULL);
0146 free_irq(irq, inst);
0147 }
0148 }
0149
0150 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
0151 {
0152 return !list_empty(&inst->handles);
0153 }
0154
0155 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
0156 {
0157 return inst->range->flags & RANGE_RESERVED;
0158 }
0159
0160 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
0161 {
0162 struct knav_queue *tmp;
0163
0164 rcu_read_lock();
0165 for_each_handle_rcu(tmp, inst) {
0166 if (tmp->flags & KNAV_QUEUE_SHARED) {
0167 rcu_read_unlock();
0168 return true;
0169 }
0170 }
0171 rcu_read_unlock();
0172 return false;
0173 }
0174
0175 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
0176 unsigned type)
0177 {
0178 if ((type == KNAV_QUEUE_QPEND) &&
0179 (inst->range->flags & RANGE_HAS_IRQ)) {
0180 return true;
0181 } else if ((type == KNAV_QUEUE_ACC) &&
0182 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
0183 return true;
0184 } else if ((type == KNAV_QUEUE_GP) &&
0185 !(inst->range->flags &
0186 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
0187 return true;
0188 }
0189 return false;
0190 }
0191
0192 static inline struct knav_queue_inst *
0193 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
0194 {
0195 struct knav_queue_inst *inst;
0196 int idx;
0197
0198 for_each_instance(idx, inst, kdev) {
0199 if (inst->id == id)
0200 return inst;
0201 }
0202 return NULL;
0203 }
0204
0205 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
0206 {
0207 if (kdev->base_id <= id &&
0208 kdev->base_id + kdev->num_queues > id) {
0209 id -= kdev->base_id;
0210 return knav_queue_match_id_to_inst(kdev, id);
0211 }
0212 return NULL;
0213 }
0214
0215 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
0216 const char *name, unsigned flags)
0217 {
0218 struct knav_queue *qh;
0219 unsigned id;
0220 int ret = 0;
0221
0222 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
0223 if (!qh)
0224 return ERR_PTR(-ENOMEM);
0225
0226 qh->stats = alloc_percpu(struct knav_queue_stats);
0227 if (!qh->stats) {
0228 ret = -ENOMEM;
0229 goto err;
0230 }
0231
0232 qh->flags = flags;
0233 qh->inst = inst;
0234 id = inst->id - inst->qmgr->start_queue;
0235 qh->reg_push = &inst->qmgr->reg_push[id];
0236 qh->reg_pop = &inst->qmgr->reg_pop[id];
0237 qh->reg_peek = &inst->qmgr->reg_peek[id];
0238
0239
0240 if (!knav_queue_is_busy(inst)) {
0241 struct knav_range_info *range = inst->range;
0242
0243 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
0244 if (range->ops && range->ops->open_queue)
0245 ret = range->ops->open_queue(range, inst, flags);
0246
0247 if (ret)
0248 goto err;
0249 }
0250 list_add_tail_rcu(&qh->list, &inst->handles);
0251 return qh;
0252
0253 err:
0254 if (qh->stats)
0255 free_percpu(qh->stats);
0256 devm_kfree(inst->kdev->dev, qh);
0257 return ERR_PTR(ret);
0258 }
0259
0260 static struct knav_queue *
0261 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
0262 {
0263 struct knav_queue_inst *inst;
0264 struct knav_queue *qh;
0265
0266 mutex_lock(&knav_dev_lock);
0267
0268 qh = ERR_PTR(-ENODEV);
0269 inst = knav_queue_find_by_id(id);
0270 if (!inst)
0271 goto unlock_ret;
0272
0273 qh = ERR_PTR(-EEXIST);
0274 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
0275 goto unlock_ret;
0276
0277 qh = ERR_PTR(-EBUSY);
0278 if ((flags & KNAV_QUEUE_SHARED) &&
0279 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
0280 goto unlock_ret;
0281
0282 qh = __knav_queue_open(inst, name, flags);
0283
0284 unlock_ret:
0285 mutex_unlock(&knav_dev_lock);
0286
0287 return qh;
0288 }
0289
0290 static struct knav_queue *knav_queue_open_by_type(const char *name,
0291 unsigned type, unsigned flags)
0292 {
0293 struct knav_queue_inst *inst;
0294 struct knav_queue *qh = ERR_PTR(-EINVAL);
0295 int idx;
0296
0297 mutex_lock(&knav_dev_lock);
0298
0299 for_each_instance(idx, inst, kdev) {
0300 if (knav_queue_is_reserved(inst))
0301 continue;
0302 if (!knav_queue_match_type(inst, type))
0303 continue;
0304 if (knav_queue_is_busy(inst))
0305 continue;
0306 qh = __knav_queue_open(inst, name, flags);
0307 goto unlock_ret;
0308 }
0309
0310 unlock_ret:
0311 mutex_unlock(&knav_dev_lock);
0312 return qh;
0313 }
0314
0315 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
0316 {
0317 struct knav_range_info *range = inst->range;
0318
0319 if (range->ops && range->ops->set_notify)
0320 range->ops->set_notify(range, inst, enabled);
0321 }
0322
0323 static int knav_queue_enable_notifier(struct knav_queue *qh)
0324 {
0325 struct knav_queue_inst *inst = qh->inst;
0326 bool first;
0327
0328 if (WARN_ON(!qh->notifier_fn))
0329 return -EINVAL;
0330
0331
0332 first = (atomic_inc_return(&qh->notifier_enabled) == 1);
0333 if (!first)
0334 return 0;
0335
0336
0337 first = (atomic_inc_return(&inst->num_notifiers) == 1);
0338 if (first)
0339 knav_queue_set_notify(inst, true);
0340
0341 return 0;
0342 }
0343
0344 static int knav_queue_disable_notifier(struct knav_queue *qh)
0345 {
0346 struct knav_queue_inst *inst = qh->inst;
0347 bool last;
0348
0349 last = (atomic_dec_return(&qh->notifier_enabled) == 0);
0350 if (!last)
0351 return 0;
0352
0353 last = (atomic_dec_return(&inst->num_notifiers) == 0);
0354 if (last)
0355 knav_queue_set_notify(inst, false);
0356
0357 return 0;
0358 }
0359
0360 static int knav_queue_set_notifier(struct knav_queue *qh,
0361 struct knav_queue_notify_config *cfg)
0362 {
0363 knav_queue_notify_fn old_fn = qh->notifier_fn;
0364
0365 if (!cfg)
0366 return -EINVAL;
0367
0368 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
0369 return -ENOTSUPP;
0370
0371 if (!cfg->fn && old_fn)
0372 knav_queue_disable_notifier(qh);
0373
0374 qh->notifier_fn = cfg->fn;
0375 qh->notifier_fn_arg = cfg->fn_arg;
0376
0377 if (cfg->fn && !old_fn)
0378 knav_queue_enable_notifier(qh);
0379
0380 return 0;
0381 }
0382
0383 static int knav_gp_set_notify(struct knav_range_info *range,
0384 struct knav_queue_inst *inst,
0385 bool enabled)
0386 {
0387 unsigned queue;
0388
0389 if (range->flags & RANGE_HAS_IRQ) {
0390 queue = inst->id - range->queue_base;
0391 if (enabled)
0392 enable_irq(range->irqs[queue].irq);
0393 else
0394 disable_irq_nosync(range->irqs[queue].irq);
0395 }
0396 return 0;
0397 }
0398
0399 static int knav_gp_open_queue(struct knav_range_info *range,
0400 struct knav_queue_inst *inst, unsigned flags)
0401 {
0402 return knav_queue_setup_irq(range, inst);
0403 }
0404
0405 static int knav_gp_close_queue(struct knav_range_info *range,
0406 struct knav_queue_inst *inst)
0407 {
0408 knav_queue_free_irq(inst);
0409 return 0;
0410 }
0411
0412 static struct knav_range_ops knav_gp_range_ops = {
0413 .set_notify = knav_gp_set_notify,
0414 .open_queue = knav_gp_open_queue,
0415 .close_queue = knav_gp_close_queue,
0416 };
0417
0418
0419 static int knav_queue_get_count(void *qhandle)
0420 {
0421 struct knav_queue *qh = qhandle;
0422 struct knav_queue_inst *inst = qh->inst;
0423
0424 return readl_relaxed(&qh->reg_peek[0].entry_count) +
0425 atomic_read(&inst->desc_count);
0426 }
0427
0428 static void knav_queue_debug_show_instance(struct seq_file *s,
0429 struct knav_queue_inst *inst)
0430 {
0431 struct knav_device *kdev = inst->kdev;
0432 struct knav_queue *qh;
0433 int cpu = 0;
0434 int pushes = 0;
0435 int pops = 0;
0436 int push_errors = 0;
0437 int pop_errors = 0;
0438 int notifies = 0;
0439
0440 if (!knav_queue_is_busy(inst))
0441 return;
0442
0443 seq_printf(s, "\tqueue id %d (%s)\n",
0444 kdev->base_id + inst->id, inst->name);
0445 for_each_handle_rcu(qh, inst) {
0446 for_each_possible_cpu(cpu) {
0447 pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
0448 pops += per_cpu_ptr(qh->stats, cpu)->pops;
0449 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
0450 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
0451 notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
0452 }
0453
0454 seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
0455 qh,
0456 pushes,
0457 pops,
0458 knav_queue_get_count(qh),
0459 notifies,
0460 push_errors,
0461 pop_errors);
0462 }
0463 }
0464
0465 static int knav_queue_debug_show(struct seq_file *s, void *v)
0466 {
0467 struct knav_queue_inst *inst;
0468 int idx;
0469
0470 mutex_lock(&knav_dev_lock);
0471 seq_printf(s, "%s: %u-%u\n",
0472 dev_name(kdev->dev), kdev->base_id,
0473 kdev->base_id + kdev->num_queues - 1);
0474 for_each_instance(idx, inst, kdev)
0475 knav_queue_debug_show_instance(s, inst);
0476 mutex_unlock(&knav_dev_lock);
0477
0478 return 0;
0479 }
0480
0481 DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
0482
0483 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
0484 u32 flags)
0485 {
0486 unsigned long end;
0487 u32 val = 0;
0488
0489 end = jiffies + msecs_to_jiffies(timeout);
0490 while (time_after(end, jiffies)) {
0491 val = readl_relaxed(addr);
0492 if (flags)
0493 val &= flags;
0494 if (!val)
0495 break;
0496 cpu_relax();
0497 }
0498 return val ? -ETIMEDOUT : 0;
0499 }
0500
0501
0502 static int knav_queue_flush(struct knav_queue *qh)
0503 {
0504 struct knav_queue_inst *inst = qh->inst;
0505 unsigned id = inst->id - inst->qmgr->start_queue;
0506
0507 atomic_set(&inst->desc_count, 0);
0508 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
0509 return 0;
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 void *knav_queue_open(const char *name, unsigned id,
0527 unsigned flags)
0528 {
0529 struct knav_queue *qh = ERR_PTR(-EINVAL);
0530
0531 switch (id) {
0532 case KNAV_QUEUE_QPEND:
0533 case KNAV_QUEUE_ACC:
0534 case KNAV_QUEUE_GP:
0535 qh = knav_queue_open_by_type(name, id, flags);
0536 break;
0537
0538 default:
0539 qh = knav_queue_open_by_id(name, id, flags);
0540 break;
0541 }
0542 return qh;
0543 }
0544 EXPORT_SYMBOL_GPL(knav_queue_open);
0545
0546
0547
0548
0549
0550 void knav_queue_close(void *qhandle)
0551 {
0552 struct knav_queue *qh = qhandle;
0553 struct knav_queue_inst *inst = qh->inst;
0554
0555 while (atomic_read(&qh->notifier_enabled) > 0)
0556 knav_queue_disable_notifier(qh);
0557
0558 mutex_lock(&knav_dev_lock);
0559 list_del_rcu(&qh->list);
0560 mutex_unlock(&knav_dev_lock);
0561 synchronize_rcu();
0562 if (!knav_queue_is_busy(inst)) {
0563 struct knav_range_info *range = inst->range;
0564
0565 if (range->ops && range->ops->close_queue)
0566 range->ops->close_queue(range, inst);
0567 }
0568 free_percpu(qh->stats);
0569 devm_kfree(inst->kdev->dev, qh);
0570 }
0571 EXPORT_SYMBOL_GPL(knav_queue_close);
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
0582 unsigned long arg)
0583 {
0584 struct knav_queue *qh = qhandle;
0585 struct knav_queue_notify_config *cfg;
0586 int ret;
0587
0588 switch ((int)cmd) {
0589 case KNAV_QUEUE_GET_ID:
0590 ret = qh->inst->kdev->base_id + qh->inst->id;
0591 break;
0592
0593 case KNAV_QUEUE_FLUSH:
0594 ret = knav_queue_flush(qh);
0595 break;
0596
0597 case KNAV_QUEUE_SET_NOTIFIER:
0598 cfg = (void *)arg;
0599 ret = knav_queue_set_notifier(qh, cfg);
0600 break;
0601
0602 case KNAV_QUEUE_ENABLE_NOTIFY:
0603 ret = knav_queue_enable_notifier(qh);
0604 break;
0605
0606 case KNAV_QUEUE_DISABLE_NOTIFY:
0607 ret = knav_queue_disable_notifier(qh);
0608 break;
0609
0610 case KNAV_QUEUE_GET_COUNT:
0611 ret = knav_queue_get_count(qh);
0612 break;
0613
0614 default:
0615 ret = -ENOTSUPP;
0616 break;
0617 }
0618 return ret;
0619 }
0620 EXPORT_SYMBOL_GPL(knav_queue_device_control);
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 int knav_queue_push(void *qhandle, dma_addr_t dma,
0634 unsigned size, unsigned flags)
0635 {
0636 struct knav_queue *qh = qhandle;
0637 u32 val;
0638
0639 val = (u32)dma | ((size / 16) - 1);
0640 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
0641
0642 this_cpu_inc(qh->stats->pushes);
0643 return 0;
0644 }
0645 EXPORT_SYMBOL_GPL(knav_queue_push);
0646
0647
0648
0649
0650
0651
0652
0653
0654 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
0655 {
0656 struct knav_queue *qh = qhandle;
0657 struct knav_queue_inst *inst = qh->inst;
0658 dma_addr_t dma;
0659 u32 val, idx;
0660
0661
0662 if (inst->descs) {
0663 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
0664 atomic_inc(&inst->desc_count);
0665 return 0;
0666 }
0667 idx = atomic_inc_return(&inst->desc_head);
0668 idx &= ACC_DESCS_MASK;
0669 val = inst->descs[idx];
0670 } else {
0671 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
0672 if (unlikely(!val))
0673 return 0;
0674 }
0675
0676 dma = val & DESC_PTR_MASK;
0677 if (size)
0678 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
0679
0680 this_cpu_inc(qh->stats->pops);
0681 return dma;
0682 }
0683 EXPORT_SYMBOL_GPL(knav_queue_pop);
0684
0685
0686 static void kdesc_fill_pool(struct knav_pool *pool)
0687 {
0688 struct knav_region *region;
0689 int i;
0690
0691 region = pool->region;
0692 pool->desc_size = region->desc_size;
0693 for (i = 0; i < pool->num_desc; i++) {
0694 int index = pool->region_offset + i;
0695 dma_addr_t dma_addr;
0696 unsigned dma_size;
0697 dma_addr = region->dma_start + (region->desc_size * index);
0698 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
0699 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
0700 DMA_TO_DEVICE);
0701 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
0702 }
0703 }
0704
0705
0706 static void kdesc_empty_pool(struct knav_pool *pool)
0707 {
0708 dma_addr_t dma;
0709 unsigned size;
0710 void *desc;
0711 int i;
0712
0713 if (!pool->queue)
0714 return;
0715
0716 for (i = 0;; i++) {
0717 dma = knav_queue_pop(pool->queue, &size);
0718 if (!dma)
0719 break;
0720 desc = knav_pool_desc_dma_to_virt(pool, dma);
0721 if (!desc) {
0722 dev_dbg(pool->kdev->dev,
0723 "couldn't unmap desc, continuing\n");
0724 continue;
0725 }
0726 }
0727 WARN_ON(i != pool->num_desc);
0728 knav_queue_close(pool->queue);
0729 }
0730
0731
0732
0733 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
0734 {
0735 struct knav_pool *pool = ph;
0736 return pool->region->dma_start + (virt - pool->region->virt_start);
0737 }
0738 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
0739
0740 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
0741 {
0742 struct knav_pool *pool = ph;
0743 return pool->region->virt_start + (dma - pool->region->dma_start);
0744 }
0745 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 void *knav_pool_create(const char *name,
0758 int num_desc, int region_id)
0759 {
0760 struct knav_region *reg_itr, *region = NULL;
0761 struct knav_pool *pool, *pi = NULL, *iter;
0762 struct list_head *node;
0763 unsigned last_offset;
0764 int ret;
0765
0766 if (!kdev)
0767 return ERR_PTR(-EPROBE_DEFER);
0768
0769 if (!kdev->dev)
0770 return ERR_PTR(-ENODEV);
0771
0772 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
0773 if (!pool) {
0774 dev_err(kdev->dev, "out of memory allocating pool\n");
0775 return ERR_PTR(-ENOMEM);
0776 }
0777
0778 for_each_region(kdev, reg_itr) {
0779 if (reg_itr->id != region_id)
0780 continue;
0781 region = reg_itr;
0782 break;
0783 }
0784
0785 if (!region) {
0786 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
0787 ret = -EINVAL;
0788 goto err;
0789 }
0790
0791 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
0792 if (IS_ERR(pool->queue)) {
0793 dev_err(kdev->dev,
0794 "failed to open queue for pool(%s), error %ld\n",
0795 name, PTR_ERR(pool->queue));
0796 ret = PTR_ERR(pool->queue);
0797 goto err;
0798 }
0799
0800 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
0801 pool->kdev = kdev;
0802 pool->dev = kdev->dev;
0803
0804 mutex_lock(&knav_dev_lock);
0805
0806 if (num_desc > (region->num_desc - region->used_desc)) {
0807 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
0808 region_id, name);
0809 ret = -ENOMEM;
0810 goto err_unlock;
0811 }
0812
0813
0814
0815
0816
0817 last_offset = 0;
0818 node = ®ion->pools;
0819 list_for_each_entry(iter, ®ion->pools, region_inst) {
0820 if ((iter->region_offset - last_offset) >= num_desc) {
0821 pi = iter;
0822 break;
0823 }
0824 last_offset = iter->region_offset + iter->num_desc;
0825 }
0826
0827 if (pi) {
0828 node = &pi->region_inst;
0829 pool->region = region;
0830 pool->num_desc = num_desc;
0831 pool->region_offset = last_offset;
0832 region->used_desc += num_desc;
0833 list_add_tail(&pool->list, &kdev->pools);
0834 list_add_tail(&pool->region_inst, node);
0835 } else {
0836 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
0837 name, region_id);
0838 ret = -ENOMEM;
0839 goto err_unlock;
0840 }
0841
0842 mutex_unlock(&knav_dev_lock);
0843 kdesc_fill_pool(pool);
0844 return pool;
0845
0846 err_unlock:
0847 mutex_unlock(&knav_dev_lock);
0848 err:
0849 kfree(pool->name);
0850 devm_kfree(kdev->dev, pool);
0851 return ERR_PTR(ret);
0852 }
0853 EXPORT_SYMBOL_GPL(knav_pool_create);
0854
0855
0856
0857
0858
0859 void knav_pool_destroy(void *ph)
0860 {
0861 struct knav_pool *pool = ph;
0862
0863 if (!pool)
0864 return;
0865
0866 if (!pool->region)
0867 return;
0868
0869 kdesc_empty_pool(pool);
0870 mutex_lock(&knav_dev_lock);
0871
0872 pool->region->used_desc -= pool->num_desc;
0873 list_del(&pool->region_inst);
0874 list_del(&pool->list);
0875
0876 mutex_unlock(&knav_dev_lock);
0877 kfree(pool->name);
0878 devm_kfree(kdev->dev, pool);
0879 }
0880 EXPORT_SYMBOL_GPL(knav_pool_destroy);
0881
0882
0883
0884
0885
0886
0887
0888
0889 void *knav_pool_desc_get(void *ph)
0890 {
0891 struct knav_pool *pool = ph;
0892 dma_addr_t dma;
0893 unsigned size;
0894 void *data;
0895
0896 dma = knav_queue_pop(pool->queue, &size);
0897 if (unlikely(!dma))
0898 return ERR_PTR(-ENOMEM);
0899 data = knav_pool_desc_dma_to_virt(pool, dma);
0900 return data;
0901 }
0902 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
0903
0904
0905
0906
0907
0908
0909 void knav_pool_desc_put(void *ph, void *desc)
0910 {
0911 struct knav_pool *pool = ph;
0912 dma_addr_t dma;
0913 dma = knav_pool_desc_virt_to_dma(pool, desc);
0914 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
0915 }
0916 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
0929 dma_addr_t *dma, unsigned *dma_sz)
0930 {
0931 struct knav_pool *pool = ph;
0932 *dma = knav_pool_desc_virt_to_dma(pool, desc);
0933 size = min(size, pool->region->desc_size);
0934 size = ALIGN(size, SMP_CACHE_BYTES);
0935 *dma_sz = size;
0936 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
0937
0938
0939 __iowmb();
0940
0941 return 0;
0942 }
0943 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
0955 {
0956 struct knav_pool *pool = ph;
0957 unsigned desc_sz;
0958 void *desc;
0959
0960 desc_sz = min(dma_sz, pool->region->desc_size);
0961 desc = knav_pool_desc_dma_to_virt(pool, dma);
0962 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
0963 prefetch(desc);
0964 return desc;
0965 }
0966 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
0967
0968
0969
0970
0971
0972
0973 int knav_pool_count(void *ph)
0974 {
0975 struct knav_pool *pool = ph;
0976 return knav_queue_get_count(pool->queue);
0977 }
0978 EXPORT_SYMBOL_GPL(knav_pool_count);
0979
0980 static void knav_queue_setup_region(struct knav_device *kdev,
0981 struct knav_region *region)
0982 {
0983 unsigned hw_num_desc, hw_desc_size, size;
0984 struct knav_reg_region __iomem *regs;
0985 struct knav_qmgr_info *qmgr;
0986 struct knav_pool *pool;
0987 int id = region->id;
0988 struct page *page;
0989
0990
0991 if (!region->num_desc) {
0992 dev_warn(kdev->dev, "unused region %s\n", region->name);
0993 return;
0994 }
0995
0996
0997 hw_num_desc = ilog2(region->num_desc - 1) + 1;
0998
0999
1000 if (region->num_desc < 32) {
1001 region->num_desc = 0;
1002 dev_warn(kdev->dev, "too few descriptors in region %s\n",
1003 region->name);
1004 return;
1005 }
1006
1007 size = region->num_desc * region->desc_size;
1008 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1009 GFP_DMA32);
1010 if (!region->virt_start) {
1011 region->num_desc = 0;
1012 dev_err(kdev->dev, "memory alloc failed for region %s\n",
1013 region->name);
1014 return;
1015 }
1016 region->virt_end = region->virt_start + size;
1017 page = virt_to_page(region->virt_start);
1018
1019 region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1020 DMA_BIDIRECTIONAL);
1021 if (dma_mapping_error(kdev->dev, region->dma_start)) {
1022 dev_err(kdev->dev, "dma map failed for region %s\n",
1023 region->name);
1024 goto fail;
1025 }
1026 region->dma_end = region->dma_start + size;
1027
1028 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1029 if (!pool) {
1030 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1031 goto fail;
1032 }
1033 pool->num_desc = 0;
1034 pool->region_offset = region->num_desc;
1035 list_add(&pool->region_inst, ®ion->pools);
1036
1037 dev_dbg(kdev->dev,
1038 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1039 region->name, id, region->desc_size, region->num_desc,
1040 region->link_index, ®ion->dma_start, ®ion->dma_end,
1041 region->virt_start, region->virt_end);
1042
1043 hw_desc_size = (region->desc_size / 16) - 1;
1044 hw_num_desc -= 5;
1045
1046 for_each_qmgr(kdev, qmgr) {
1047 regs = qmgr->reg_region + id;
1048 writel_relaxed((u32)region->dma_start, ®s->base);
1049 writel_relaxed(region->link_index, ®s->start_index);
1050 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1051 ®s->size_count);
1052 }
1053 return;
1054
1055 fail:
1056 if (region->dma_start)
1057 dma_unmap_page(kdev->dev, region->dma_start, size,
1058 DMA_BIDIRECTIONAL);
1059 if (region->virt_start)
1060 free_pages_exact(region->virt_start, size);
1061 region->num_desc = 0;
1062 return;
1063 }
1064
1065 static const char *knav_queue_find_name(struct device_node *node)
1066 {
1067 const char *name;
1068
1069 if (of_property_read_string(node, "label", &name) < 0)
1070 name = node->name;
1071 if (!name)
1072 name = "unknown";
1073 return name;
1074 }
1075
1076 static int knav_queue_setup_regions(struct knav_device *kdev,
1077 struct device_node *regions)
1078 {
1079 struct device *dev = kdev->dev;
1080 struct knav_region *region;
1081 struct device_node *child;
1082 u32 temp[2];
1083 int ret;
1084
1085 for_each_child_of_node(regions, child) {
1086 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1087 if (!region) {
1088 of_node_put(child);
1089 dev_err(dev, "out of memory allocating region\n");
1090 return -ENOMEM;
1091 }
1092
1093 region->name = knav_queue_find_name(child);
1094 of_property_read_u32(child, "id", ®ion->id);
1095 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1096 if (!ret) {
1097 region->num_desc = temp[0];
1098 region->desc_size = temp[1];
1099 } else {
1100 dev_err(dev, "invalid region info %s\n", region->name);
1101 devm_kfree(dev, region);
1102 continue;
1103 }
1104
1105 if (!of_get_property(child, "link-index", NULL)) {
1106 dev_err(dev, "No link info for %s\n", region->name);
1107 devm_kfree(dev, region);
1108 continue;
1109 }
1110 ret = of_property_read_u32(child, "link-index",
1111 ®ion->link_index);
1112 if (ret) {
1113 dev_err(dev, "link index not found for %s\n",
1114 region->name);
1115 devm_kfree(dev, region);
1116 continue;
1117 }
1118
1119 INIT_LIST_HEAD(®ion->pools);
1120 list_add_tail(®ion->list, &kdev->regions);
1121 }
1122 if (list_empty(&kdev->regions)) {
1123 dev_err(dev, "no valid region information found\n");
1124 return -ENODEV;
1125 }
1126
1127
1128 for_each_region(kdev, region)
1129 knav_queue_setup_region(kdev, region);
1130
1131 return 0;
1132 }
1133
1134 static int knav_get_link_ram(struct knav_device *kdev,
1135 const char *name,
1136 struct knav_link_ram_block *block)
1137 {
1138 struct platform_device *pdev = to_platform_device(kdev->dev);
1139 struct device_node *node = pdev->dev.of_node;
1140 u32 temp[2];
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 if (!of_property_read_u32_array(node, name , temp, 2)) {
1156 if (temp[0]) {
1157
1158
1159
1160
1161 block->dma = (dma_addr_t)temp[0];
1162 block->virt = NULL;
1163 block->size = temp[1];
1164 } else {
1165 block->size = temp[1];
1166
1167 block->virt = dmam_alloc_coherent(kdev->dev,
1168 8 * block->size, &block->dma,
1169 GFP_KERNEL);
1170 if (!block->virt) {
1171 dev_err(kdev->dev, "failed to alloc linkram\n");
1172 return -ENOMEM;
1173 }
1174 }
1175 } else {
1176 return -ENODEV;
1177 }
1178 return 0;
1179 }
1180
1181 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1182 {
1183 struct knav_link_ram_block *block;
1184 struct knav_qmgr_info *qmgr;
1185
1186 for_each_qmgr(kdev, qmgr) {
1187 block = &kdev->link_rams[0];
1188 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1189 &block->dma, block->virt, block->size);
1190 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1191 if (kdev->version == QMSS_66AK2G)
1192 writel_relaxed(block->size,
1193 &qmgr->reg_config->link_ram_size0);
1194 else
1195 writel_relaxed(block->size - 1,
1196 &qmgr->reg_config->link_ram_size0);
1197 block++;
1198 if (!block->size)
1199 continue;
1200
1201 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1202 &block->dma, block->virt, block->size);
1203 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1204 }
1205
1206 return 0;
1207 }
1208
1209 static int knav_setup_queue_range(struct knav_device *kdev,
1210 struct device_node *node)
1211 {
1212 struct device *dev = kdev->dev;
1213 struct knav_range_info *range;
1214 struct knav_qmgr_info *qmgr;
1215 u32 temp[2], start, end, id, index;
1216 int ret, i;
1217
1218 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1219 if (!range) {
1220 dev_err(dev, "out of memory allocating range\n");
1221 return -ENOMEM;
1222 }
1223
1224 range->kdev = kdev;
1225 range->name = knav_queue_find_name(node);
1226 ret = of_property_read_u32_array(node, "qrange", temp, 2);
1227 if (!ret) {
1228 range->queue_base = temp[0] - kdev->base_id;
1229 range->num_queues = temp[1];
1230 } else {
1231 dev_err(dev, "invalid queue range %s\n", range->name);
1232 devm_kfree(dev, range);
1233 return -EINVAL;
1234 }
1235
1236 for (i = 0; i < RANGE_MAX_IRQS; i++) {
1237 struct of_phandle_args oirq;
1238
1239 if (of_irq_parse_one(node, i, &oirq))
1240 break;
1241
1242 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1243 if (range->irqs[i].irq == IRQ_NONE)
1244 break;
1245
1246 range->num_irqs++;
1247
1248 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1249 unsigned long mask;
1250 int bit;
1251
1252 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1253 cpumask_size(), GFP_KERNEL);
1254 if (!range->irqs[i].cpu_mask)
1255 return -ENOMEM;
1256
1257 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1258 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1259 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1260 }
1261 }
1262
1263 range->num_irqs = min(range->num_irqs, range->num_queues);
1264 if (range->num_irqs)
1265 range->flags |= RANGE_HAS_IRQ;
1266
1267 if (of_get_property(node, "qalloc-by-id", NULL))
1268 range->flags |= RANGE_RESERVED;
1269
1270 if (of_get_property(node, "accumulator", NULL)) {
1271 ret = knav_init_acc_range(kdev, node, range);
1272 if (ret < 0) {
1273 devm_kfree(dev, range);
1274 return ret;
1275 }
1276 } else {
1277 range->ops = &knav_gp_range_ops;
1278 }
1279
1280
1281 for_each_qmgr(kdev, qmgr) {
1282 start = max(qmgr->start_queue, range->queue_base);
1283 end = min(qmgr->start_queue + qmgr->num_queues,
1284 range->queue_base + range->num_queues);
1285 for (id = start; id < end; id++) {
1286 index = id - qmgr->start_queue;
1287 writel_relaxed(THRESH_GTE | 1,
1288 &qmgr->reg_peek[index].ptr_size_thresh);
1289 writel_relaxed(0,
1290 &qmgr->reg_push[index].ptr_size_thresh);
1291 }
1292 }
1293
1294 list_add_tail(&range->list, &kdev->queue_ranges);
1295 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1296 range->name, range->queue_base,
1297 range->queue_base + range->num_queues - 1,
1298 range->num_irqs,
1299 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1300 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1301 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1302 kdev->num_queues_in_use += range->num_queues;
1303 return 0;
1304 }
1305
1306 static int knav_setup_queue_pools(struct knav_device *kdev,
1307 struct device_node *queue_pools)
1308 {
1309 struct device_node *type, *range;
1310
1311 for_each_child_of_node(queue_pools, type) {
1312 for_each_child_of_node(type, range) {
1313
1314 knav_setup_queue_range(kdev, range);
1315 }
1316 }
1317
1318
1319 if (list_empty(&kdev->queue_ranges)) {
1320 dev_err(kdev->dev, "no valid queue range found\n");
1321 return -ENODEV;
1322 }
1323 return 0;
1324 }
1325
1326 static void knav_free_queue_range(struct knav_device *kdev,
1327 struct knav_range_info *range)
1328 {
1329 if (range->ops && range->ops->free_range)
1330 range->ops->free_range(range);
1331 list_del(&range->list);
1332 devm_kfree(kdev->dev, range);
1333 }
1334
1335 static void knav_free_queue_ranges(struct knav_device *kdev)
1336 {
1337 struct knav_range_info *range;
1338
1339 for (;;) {
1340 range = first_queue_range(kdev);
1341 if (!range)
1342 break;
1343 knav_free_queue_range(kdev, range);
1344 }
1345 }
1346
1347 static void knav_queue_free_regions(struct knav_device *kdev)
1348 {
1349 struct knav_region *region;
1350 struct knav_pool *pool, *tmp;
1351 unsigned size;
1352
1353 for (;;) {
1354 region = first_region(kdev);
1355 if (!region)
1356 break;
1357 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1358 knav_pool_destroy(pool);
1359
1360 size = region->virt_end - region->virt_start;
1361 if (size)
1362 free_pages_exact(region->virt_start, size);
1363 list_del(®ion->list);
1364 devm_kfree(kdev->dev, region);
1365 }
1366 }
1367
1368 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1369 struct device_node *node, int index)
1370 {
1371 struct resource res;
1372 void __iomem *regs;
1373 int ret;
1374
1375 ret = of_address_to_resource(node, index, &res);
1376 if (ret) {
1377 dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1378 node, index);
1379 return ERR_PTR(ret);
1380 }
1381
1382 regs = devm_ioremap_resource(kdev->dev, &res);
1383 if (IS_ERR(regs))
1384 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1385 index, node);
1386 return regs;
1387 }
1388
1389 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1390 struct device_node *qmgrs)
1391 {
1392 struct device *dev = kdev->dev;
1393 struct knav_qmgr_info *qmgr;
1394 struct device_node *child;
1395 u32 temp[2];
1396 int ret;
1397
1398 for_each_child_of_node(qmgrs, child) {
1399 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1400 if (!qmgr) {
1401 of_node_put(child);
1402 dev_err(dev, "out of memory allocating qmgr\n");
1403 return -ENOMEM;
1404 }
1405
1406 ret = of_property_read_u32_array(child, "managed-queues",
1407 temp, 2);
1408 if (!ret) {
1409 qmgr->start_queue = temp[0];
1410 qmgr->num_queues = temp[1];
1411 } else {
1412 dev_err(dev, "invalid qmgr queue range\n");
1413 devm_kfree(dev, qmgr);
1414 continue;
1415 }
1416
1417 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1418 qmgr->start_queue, qmgr->num_queues);
1419
1420 qmgr->reg_peek =
1421 knav_queue_map_reg(kdev, child,
1422 KNAV_QUEUE_PEEK_REG_INDEX);
1423
1424 if (kdev->version == QMSS) {
1425 qmgr->reg_status =
1426 knav_queue_map_reg(kdev, child,
1427 KNAV_QUEUE_STATUS_REG_INDEX);
1428 }
1429
1430 qmgr->reg_config =
1431 knav_queue_map_reg(kdev, child,
1432 (kdev->version == QMSS_66AK2G) ?
1433 KNAV_L_QUEUE_CONFIG_REG_INDEX :
1434 KNAV_QUEUE_CONFIG_REG_INDEX);
1435 qmgr->reg_region =
1436 knav_queue_map_reg(kdev, child,
1437 (kdev->version == QMSS_66AK2G) ?
1438 KNAV_L_QUEUE_REGION_REG_INDEX :
1439 KNAV_QUEUE_REGION_REG_INDEX);
1440
1441 qmgr->reg_push =
1442 knav_queue_map_reg(kdev, child,
1443 (kdev->version == QMSS_66AK2G) ?
1444 KNAV_L_QUEUE_PUSH_REG_INDEX :
1445 KNAV_QUEUE_PUSH_REG_INDEX);
1446
1447 if (kdev->version == QMSS) {
1448 qmgr->reg_pop =
1449 knav_queue_map_reg(kdev, child,
1450 KNAV_QUEUE_POP_REG_INDEX);
1451 }
1452
1453 if (IS_ERR(qmgr->reg_peek) ||
1454 ((kdev->version == QMSS) &&
1455 (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1456 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1457 IS_ERR(qmgr->reg_push)) {
1458 dev_err(dev, "failed to map qmgr regs\n");
1459 if (kdev->version == QMSS) {
1460 if (!IS_ERR(qmgr->reg_status))
1461 devm_iounmap(dev, qmgr->reg_status);
1462 if (!IS_ERR(qmgr->reg_pop))
1463 devm_iounmap(dev, qmgr->reg_pop);
1464 }
1465 if (!IS_ERR(qmgr->reg_peek))
1466 devm_iounmap(dev, qmgr->reg_peek);
1467 if (!IS_ERR(qmgr->reg_config))
1468 devm_iounmap(dev, qmgr->reg_config);
1469 if (!IS_ERR(qmgr->reg_region))
1470 devm_iounmap(dev, qmgr->reg_region);
1471 if (!IS_ERR(qmgr->reg_push))
1472 devm_iounmap(dev, qmgr->reg_push);
1473 devm_kfree(dev, qmgr);
1474 continue;
1475 }
1476
1477
1478 if (kdev->version == QMSS_66AK2G)
1479 qmgr->reg_pop = qmgr->reg_push;
1480
1481 list_add_tail(&qmgr->list, &kdev->qmgrs);
1482 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1483 qmgr->start_queue, qmgr->num_queues,
1484 qmgr->reg_peek, qmgr->reg_status,
1485 qmgr->reg_config, qmgr->reg_region,
1486 qmgr->reg_push, qmgr->reg_pop);
1487 }
1488 return 0;
1489 }
1490
1491 static int knav_queue_init_pdsps(struct knav_device *kdev,
1492 struct device_node *pdsps)
1493 {
1494 struct device *dev = kdev->dev;
1495 struct knav_pdsp_info *pdsp;
1496 struct device_node *child;
1497
1498 for_each_child_of_node(pdsps, child) {
1499 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1500 if (!pdsp) {
1501 of_node_put(child);
1502 dev_err(dev, "out of memory allocating pdsp\n");
1503 return -ENOMEM;
1504 }
1505 pdsp->name = knav_queue_find_name(child);
1506 pdsp->iram =
1507 knav_queue_map_reg(kdev, child,
1508 KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1509 pdsp->regs =
1510 knav_queue_map_reg(kdev, child,
1511 KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1512 pdsp->intd =
1513 knav_queue_map_reg(kdev, child,
1514 KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1515 pdsp->command =
1516 knav_queue_map_reg(kdev, child,
1517 KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1518
1519 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1520 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1521 dev_err(dev, "failed to map pdsp %s regs\n",
1522 pdsp->name);
1523 if (!IS_ERR(pdsp->command))
1524 devm_iounmap(dev, pdsp->command);
1525 if (!IS_ERR(pdsp->iram))
1526 devm_iounmap(dev, pdsp->iram);
1527 if (!IS_ERR(pdsp->regs))
1528 devm_iounmap(dev, pdsp->regs);
1529 if (!IS_ERR(pdsp->intd))
1530 devm_iounmap(dev, pdsp->intd);
1531 devm_kfree(dev, pdsp);
1532 continue;
1533 }
1534 of_property_read_u32(child, "id", &pdsp->id);
1535 list_add_tail(&pdsp->list, &kdev->pdsps);
1536 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1537 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1538 pdsp->intd);
1539 }
1540 return 0;
1541 }
1542
1543 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1544 struct knav_pdsp_info *pdsp)
1545 {
1546 u32 val, timeout = 1000;
1547 int ret;
1548
1549 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1550 writel_relaxed(val, &pdsp->regs->control);
1551 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1552 PDSP_CTRL_RUNNING);
1553 if (ret < 0) {
1554 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1555 return ret;
1556 }
1557 pdsp->loaded = false;
1558 pdsp->started = false;
1559 return 0;
1560 }
1561
1562 static int knav_queue_load_pdsp(struct knav_device *kdev,
1563 struct knav_pdsp_info *pdsp)
1564 {
1565 int i, ret, fwlen;
1566 const struct firmware *fw;
1567 bool found = false;
1568 u32 *fwdata;
1569
1570 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1571 if (knav_acc_firmwares[i]) {
1572 ret = request_firmware_direct(&fw,
1573 knav_acc_firmwares[i],
1574 kdev->dev);
1575 if (!ret) {
1576 found = true;
1577 break;
1578 }
1579 }
1580 }
1581
1582 if (!found) {
1583 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1584 return -ENODEV;
1585 }
1586
1587 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1588 knav_acc_firmwares[i]);
1589
1590 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1591
1592 fwdata = (u32 *)fw->data;
1593 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1594 for (i = 0; i < fwlen; i++)
1595 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1596
1597 release_firmware(fw);
1598 return 0;
1599 }
1600
1601 static int knav_queue_start_pdsp(struct knav_device *kdev,
1602 struct knav_pdsp_info *pdsp)
1603 {
1604 u32 val, timeout = 1000;
1605 int ret;
1606
1607
1608 writel_relaxed(0xffffffff, pdsp->command);
1609 while (readl_relaxed(pdsp->command) != 0xffffffff)
1610 cpu_relax();
1611
1612
1613 val = readl_relaxed(&pdsp->regs->control);
1614 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1615 writel_relaxed(val, &pdsp->regs->control);
1616
1617
1618 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1619 writel_relaxed(val, &pdsp->regs->control);
1620
1621
1622 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1623 if (ret < 0) {
1624 dev_err(kdev->dev,
1625 "timed out on pdsp %s command register wait\n",
1626 pdsp->name);
1627 return ret;
1628 }
1629 return 0;
1630 }
1631
1632 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1633 {
1634 struct knav_pdsp_info *pdsp;
1635
1636
1637 for_each_pdsp(kdev, pdsp)
1638 knav_queue_stop_pdsp(kdev, pdsp);
1639 }
1640
1641 static int knav_queue_start_pdsps(struct knav_device *kdev)
1642 {
1643 struct knav_pdsp_info *pdsp;
1644 int ret;
1645
1646 knav_queue_stop_pdsps(kdev);
1647
1648
1649
1650
1651
1652
1653 for_each_pdsp(kdev, pdsp) {
1654 ret = knav_queue_load_pdsp(kdev, pdsp);
1655 if (!ret)
1656 pdsp->loaded = true;
1657 }
1658
1659 for_each_pdsp(kdev, pdsp) {
1660 if (pdsp->loaded) {
1661 ret = knav_queue_start_pdsp(kdev, pdsp);
1662 if (!ret)
1663 pdsp->started = true;
1664 }
1665 }
1666 return 0;
1667 }
1668
1669 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1670 {
1671 struct knav_qmgr_info *qmgr;
1672
1673 for_each_qmgr(kdev, qmgr) {
1674 if ((id >= qmgr->start_queue) &&
1675 (id < qmgr->start_queue + qmgr->num_queues))
1676 return qmgr;
1677 }
1678 return NULL;
1679 }
1680
1681 static int knav_queue_init_queue(struct knav_device *kdev,
1682 struct knav_range_info *range,
1683 struct knav_queue_inst *inst,
1684 unsigned id)
1685 {
1686 char irq_name[KNAV_NAME_SIZE];
1687 inst->qmgr = knav_find_qmgr(id);
1688 if (!inst->qmgr)
1689 return -1;
1690
1691 INIT_LIST_HEAD(&inst->handles);
1692 inst->kdev = kdev;
1693 inst->range = range;
1694 inst->irq_num = -1;
1695 inst->id = id;
1696 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1697 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1698
1699 if (range->ops && range->ops->init_queue)
1700 return range->ops->init_queue(range, inst);
1701 else
1702 return 0;
1703 }
1704
1705 static int knav_queue_init_queues(struct knav_device *kdev)
1706 {
1707 struct knav_range_info *range;
1708 int size, id, base_idx;
1709 int idx = 0, ret = 0;
1710
1711
1712 size = sizeof(struct knav_queue_inst);
1713
1714
1715
1716
1717 kdev->inst_shift = order_base_2(size);
1718 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1719 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1720 if (!kdev->instances)
1721 return -ENOMEM;
1722
1723 for_each_queue_range(kdev, range) {
1724 if (range->ops && range->ops->init_range)
1725 range->ops->init_range(range);
1726 base_idx = idx;
1727 for (id = range->queue_base;
1728 id < range->queue_base + range->num_queues; id++, idx++) {
1729 ret = knav_queue_init_queue(kdev, range,
1730 knav_queue_idx_to_inst(kdev, idx), id);
1731 if (ret < 0)
1732 return ret;
1733 }
1734 range->queue_base_inst =
1735 knav_queue_idx_to_inst(kdev, base_idx);
1736 }
1737 return 0;
1738 }
1739
1740
1741 static const struct of_device_id keystone_qmss_of_match[] = {
1742 {
1743 .compatible = "ti,keystone-navigator-qmss",
1744 },
1745 {
1746 .compatible = "ti,66ak2g-navss-qm",
1747 .data = (void *)QMSS_66AK2G,
1748 },
1749 {},
1750 };
1751 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1752
1753 static int knav_queue_probe(struct platform_device *pdev)
1754 {
1755 struct device_node *node = pdev->dev.of_node;
1756 struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1757 const struct of_device_id *match;
1758 struct device *dev = &pdev->dev;
1759 u32 temp[2];
1760 int ret;
1761
1762 if (!node) {
1763 dev_err(dev, "device tree info unavailable\n");
1764 return -ENODEV;
1765 }
1766
1767 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1768 if (!kdev) {
1769 dev_err(dev, "memory allocation failed\n");
1770 return -ENOMEM;
1771 }
1772
1773 match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1774 if (match && match->data)
1775 kdev->version = QMSS_66AK2G;
1776
1777 platform_set_drvdata(pdev, kdev);
1778 kdev->dev = dev;
1779 INIT_LIST_HEAD(&kdev->queue_ranges);
1780 INIT_LIST_HEAD(&kdev->qmgrs);
1781 INIT_LIST_HEAD(&kdev->pools);
1782 INIT_LIST_HEAD(&kdev->regions);
1783 INIT_LIST_HEAD(&kdev->pdsps);
1784
1785 pm_runtime_enable(&pdev->dev);
1786 ret = pm_runtime_resume_and_get(&pdev->dev);
1787 if (ret < 0) {
1788 dev_err(dev, "Failed to enable QMSS\n");
1789 return ret;
1790 }
1791
1792 if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1793 dev_err(dev, "queue-range not specified\n");
1794 ret = -ENODEV;
1795 goto err;
1796 }
1797 kdev->base_id = temp[0];
1798 kdev->num_queues = temp[1];
1799
1800
1801 qmgrs = of_get_child_by_name(node, "qmgrs");
1802 if (!qmgrs) {
1803 dev_err(dev, "queue manager info not specified\n");
1804 ret = -ENODEV;
1805 goto err;
1806 }
1807 ret = knav_queue_init_qmgrs(kdev, qmgrs);
1808 of_node_put(qmgrs);
1809 if (ret)
1810 goto err;
1811
1812
1813 pdsps = of_get_child_by_name(node, "pdsps");
1814 if (pdsps) {
1815 ret = knav_queue_init_pdsps(kdev, pdsps);
1816 if (ret)
1817 goto err;
1818
1819 ret = knav_queue_start_pdsps(kdev);
1820 if (ret)
1821 goto err;
1822 }
1823 of_node_put(pdsps);
1824
1825
1826 queue_pools = of_get_child_by_name(node, "queue-pools");
1827 if (!queue_pools) {
1828 dev_err(dev, "queue-pools not specified\n");
1829 ret = -ENODEV;
1830 goto err;
1831 }
1832 ret = knav_setup_queue_pools(kdev, queue_pools);
1833 of_node_put(queue_pools);
1834 if (ret)
1835 goto err;
1836
1837 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1838 if (ret) {
1839 dev_err(kdev->dev, "could not setup linking ram\n");
1840 goto err;
1841 }
1842
1843 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1844 if (ret) {
1845
1846
1847
1848
1849 }
1850
1851 ret = knav_queue_setup_link_ram(kdev);
1852 if (ret)
1853 goto err;
1854
1855 regions = of_get_child_by_name(node, "descriptor-regions");
1856 if (!regions) {
1857 dev_err(dev, "descriptor-regions not specified\n");
1858 ret = -ENODEV;
1859 goto err;
1860 }
1861 ret = knav_queue_setup_regions(kdev, regions);
1862 of_node_put(regions);
1863 if (ret)
1864 goto err;
1865
1866 ret = knav_queue_init_queues(kdev);
1867 if (ret < 0) {
1868 dev_err(dev, "hwqueue initialization failed\n");
1869 goto err;
1870 }
1871
1872 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1873 &knav_queue_debug_fops);
1874 device_ready = true;
1875 return 0;
1876
1877 err:
1878 knav_queue_stop_pdsps(kdev);
1879 knav_queue_free_regions(kdev);
1880 knav_free_queue_ranges(kdev);
1881 pm_runtime_put_sync(&pdev->dev);
1882 pm_runtime_disable(&pdev->dev);
1883 return ret;
1884 }
1885
1886 static int knav_queue_remove(struct platform_device *pdev)
1887 {
1888
1889 pm_runtime_put_sync(&pdev->dev);
1890 pm_runtime_disable(&pdev->dev);
1891 return 0;
1892 }
1893
1894 static struct platform_driver keystone_qmss_driver = {
1895 .probe = knav_queue_probe,
1896 .remove = knav_queue_remove,
1897 .driver = {
1898 .name = "keystone-navigator-qmss",
1899 .of_match_table = keystone_qmss_of_match,
1900 },
1901 };
1902 module_platform_driver(keystone_qmss_driver);
1903
1904 MODULE_LICENSE("GPL v2");
1905 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1906 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1907 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");