0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) "%s: " fmt, __func__
0010
0011 #include <linux/device.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/kernel.h>
0016 #include <linux/mailbox_controller.h>
0017 #include <linux/module.h>
0018 #include <linux/of_device.h>
0019 #include <linux/of.h>
0020 #include <linux/of_irq.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/soc/ti/ti-msgmgr.h>
0023
0024 #define Q_DATA_OFFSET(proxy, queue, reg) \
0025 ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
0026 #define Q_STATE_OFFSET(queue) ((queue) * 0x4)
0027 #define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
0028
0029 #define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
0030 #define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
0031 (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
0032
0033 #define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
0034
0035 #define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
0036
0037 #define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
0038 #define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
0039
0040
0041
0042
0043
0044
0045
0046 struct ti_msgmgr_valid_queue_desc {
0047 u8 queue_id;
0048 u8 proxy_id;
0049 bool is_tx;
0050 };
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 struct ti_msgmgr_desc {
0074 u8 queue_count;
0075 u8 max_message_size;
0076 u8 max_messages;
0077 u8 data_first_reg;
0078 u8 data_last_reg;
0079 u32 status_cnt_mask;
0080 u32 status_err_mask;
0081 bool tx_polled;
0082 int tx_poll_timeout_ms;
0083 const struct ti_msgmgr_valid_queue_desc *valid_queues;
0084 const char *data_region_name;
0085 const char *status_region_name;
0086 const char *ctrl_region_name;
0087 int num_valid_queues;
0088 bool is_sproxy;
0089 };
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 struct ti_queue_inst {
0107 char name[30];
0108 u8 queue_id;
0109 u8 proxy_id;
0110 int irq;
0111 bool is_tx;
0112 void __iomem *queue_buff_start;
0113 void __iomem *queue_buff_end;
0114 void __iomem *queue_state;
0115 void __iomem *queue_ctrl;
0116 struct mbox_chan *chan;
0117 u32 *rx_buff;
0118 bool polled_rx_mode;
0119 };
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 struct ti_msgmgr_inst {
0136 struct device *dev;
0137 const struct ti_msgmgr_desc *desc;
0138 void __iomem *queue_proxy_region;
0139 void __iomem *queue_state_debug_region;
0140 void __iomem *queue_ctrl_region;
0141 u8 num_valid_queues;
0142 struct ti_queue_inst *qinsts;
0143 struct mbox_controller mbox;
0144 struct mbox_chan *chans;
0145 };
0146
0147
0148
0149
0150
0151
0152
0153
0154 static inline int
0155 ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
0156 struct ti_queue_inst *qinst)
0157 {
0158 u32 val;
0159 u32 status_cnt_mask = d->status_cnt_mask;
0160
0161
0162
0163
0164
0165 val = readl(qinst->queue_state) & status_cnt_mask;
0166 val >>= __ffs(status_cnt_mask);
0167
0168 return val;
0169 }
0170
0171
0172
0173
0174
0175
0176
0177
0178 static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
0179 struct ti_queue_inst *qinst)
0180 {
0181 u32 val;
0182
0183
0184 if (!d->is_sproxy)
0185 return false;
0186
0187
0188
0189
0190
0191 val = readl(qinst->queue_state) & d->status_err_mask;
0192
0193 return val ? true : false;
0194 }
0195
0196 static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst,
0197 const struct ti_msgmgr_desc *desc)
0198 {
0199 int num_words;
0200 struct ti_msgmgr_message message;
0201 void __iomem *data_reg;
0202 u32 *word_data;
0203
0204
0205
0206
0207
0208
0209
0210 message.len = desc->max_message_size;
0211 message.buf = (u8 *)qinst->rx_buff;
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
0228 num_words = (desc->max_message_size / sizeof(u32));
0229 num_words; num_words--, data_reg += sizeof(u32), word_data++)
0230 *word_data = readl(data_reg);
0231
0232
0233
0234
0235
0236
0237
0238 mbox_chan_received_data(chan, (void *)&message);
0239
0240 return 0;
0241 }
0242
0243 static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us)
0244 {
0245 struct device *dev = chan->mbox->dev;
0246 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0247 struct ti_queue_inst *qinst = chan->con_priv;
0248 const struct ti_msgmgr_desc *desc = inst->desc;
0249 int msg_count;
0250 int ret;
0251
0252 ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count,
0253 (msg_count & desc->status_cnt_mask),
0254 10, timeout_us);
0255 if (ret != 0)
0256 return ret;
0257
0258 ti_msgmgr_queue_rx_data(chan, qinst, desc);
0259
0260 return 0;
0261 }
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
0273 {
0274 struct mbox_chan *chan = p;
0275 struct device *dev = chan->mbox->dev;
0276 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0277 struct ti_queue_inst *qinst = chan->con_priv;
0278 const struct ti_msgmgr_desc *desc;
0279 int msg_count;
0280
0281 if (WARN_ON(!inst)) {
0282 dev_err(dev, "no platform drv data??\n");
0283 return -EINVAL;
0284 }
0285
0286
0287 if (qinst->is_tx) {
0288 dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
0289 qinst->name);
0290 return IRQ_NONE;
0291 }
0292
0293 desc = inst->desc;
0294 if (ti_msgmgr_queue_is_error(desc, qinst)) {
0295 dev_err(dev, "Error on Rx channel %s\n", qinst->name);
0296 return IRQ_NONE;
0297 }
0298
0299
0300 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
0301 if (!msg_count) {
0302
0303 dev_dbg(dev, "Spurious event - 0 pending data!\n");
0304 return IRQ_NONE;
0305 }
0306
0307 ti_msgmgr_queue_rx_data(chan, qinst, desc);
0308
0309 return IRQ_HANDLED;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318 static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
0319 {
0320 struct ti_queue_inst *qinst = chan->con_priv;
0321 struct device *dev = chan->mbox->dev;
0322 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0323 const struct ti_msgmgr_desc *desc = inst->desc;
0324 int msg_count;
0325
0326 if (qinst->is_tx)
0327 return false;
0328
0329 if (ti_msgmgr_queue_is_error(desc, qinst)) {
0330 dev_err(dev, "Error on channel %s\n", qinst->name);
0331 return false;
0332 }
0333
0334 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
0335
0336 return msg_count ? true : false;
0337 }
0338
0339
0340
0341
0342
0343
0344
0345 static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
0346 {
0347 struct ti_queue_inst *qinst = chan->con_priv;
0348 struct device *dev = chan->mbox->dev;
0349 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0350 const struct ti_msgmgr_desc *desc = inst->desc;
0351 int msg_count;
0352
0353 if (!qinst->is_tx)
0354 return false;
0355
0356 if (ti_msgmgr_queue_is_error(desc, qinst)) {
0357 dev_err(dev, "Error on channel %s\n", qinst->name);
0358 return false;
0359 }
0360
0361 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
0362
0363 if (desc->is_sproxy) {
0364
0365 return msg_count ? true : false;
0366 }
0367
0368
0369 return msg_count ? false : true;
0370 }
0371
0372 static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan)
0373 {
0374 struct ti_queue_inst *qinst;
0375
0376 if (!chan)
0377 return false;
0378
0379 qinst = chan->con_priv;
0380 return qinst->polled_rx_mode;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390 static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
0391 {
0392 struct device *dev = chan->mbox->dev;
0393 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0394 const struct ti_msgmgr_desc *desc;
0395 struct ti_queue_inst *qinst = chan->con_priv;
0396 int num_words, trail_bytes;
0397 struct ti_msgmgr_message *message = data;
0398 void __iomem *data_reg;
0399 u32 *word_data;
0400 int ret = 0;
0401
0402 if (WARN_ON(!inst)) {
0403 dev_err(dev, "no platform drv data??\n");
0404 return -EINVAL;
0405 }
0406 desc = inst->desc;
0407
0408 if (ti_msgmgr_queue_is_error(desc, qinst)) {
0409 dev_err(dev, "Error on channel %s\n", qinst->name);
0410 return false;
0411 }
0412
0413 if (desc->max_message_size < message->len) {
0414 dev_err(dev, "Queue %s message length %zu > max %d\n",
0415 qinst->name, message->len, desc->max_message_size);
0416 return -EINVAL;
0417 }
0418
0419
0420 for (data_reg = qinst->queue_buff_start,
0421 num_words = message->len / sizeof(u32),
0422 word_data = (u32 *)message->buf;
0423 num_words; num_words--, data_reg += sizeof(u32), word_data++)
0424 writel(*word_data, data_reg);
0425
0426 trail_bytes = message->len % sizeof(u32);
0427 if (trail_bytes) {
0428 u32 data_trail = *word_data;
0429
0430
0431 data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
0432 writel(data_trail, data_reg);
0433 data_reg++;
0434 }
0435
0436
0437
0438
0439 if (data_reg <= qinst->queue_buff_end)
0440 writel(0, qinst->queue_buff_end);
0441
0442
0443 if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
0444 ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx,
0445 message->timeout_rx_ms * 1000);
0446
0447 return ret;
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457 static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
0458 const struct ti_msgmgr_desc *d,
0459 struct ti_queue_inst *qinst,
0460 struct mbox_chan *chan)
0461 {
0462 int ret = 0;
0463 char of_rx_irq_name[7];
0464 struct device_node *np;
0465
0466 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
0467 "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
0468
0469
0470 if (qinst->irq < 0) {
0471 np = of_node_get(dev->of_node);
0472 if (!np)
0473 return -ENODATA;
0474 qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
0475 of_node_put(np);
0476
0477 if (qinst->irq < 0) {
0478 dev_err(dev,
0479 "QID %d PID %d:No IRQ[%s]: %d\n",
0480 qinst->queue_id, qinst->proxy_id,
0481 of_rx_irq_name, qinst->irq);
0482 return qinst->irq;
0483 }
0484 }
0485
0486
0487 ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
0488 IRQF_SHARED, qinst->name, chan);
0489 if (ret) {
0490 dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
0491 qinst->irq, qinst->name, ret);
0492 }
0493
0494 return ret;
0495 }
0496
0497
0498
0499
0500
0501
0502
0503 static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
0504 {
0505 struct device *dev = chan->mbox->dev;
0506 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0507 struct ti_queue_inst *qinst = chan->con_priv;
0508 const struct ti_msgmgr_desc *d = inst->desc;
0509 int ret;
0510 int msg_count;
0511
0512
0513
0514
0515
0516 if (d->is_sproxy) {
0517 qinst->is_tx = (readl(qinst->queue_ctrl) &
0518 SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
0519
0520 msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
0521
0522 if (!msg_count && qinst->is_tx) {
0523 dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
0524 qinst->name);
0525 return -EINVAL;
0526 }
0527 }
0528
0529 if (!qinst->is_tx) {
0530
0531 qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
0532 if (!qinst->rx_buff)
0533 return -ENOMEM;
0534
0535 ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
0536 if (ret) {
0537 kfree(qinst->rx_buff);
0538 return ret;
0539 }
0540 }
0541
0542 return 0;
0543 }
0544
0545
0546
0547
0548
0549 static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
0550 {
0551 struct ti_queue_inst *qinst = chan->con_priv;
0552
0553 if (!qinst->is_tx) {
0554 free_irq(qinst->irq, chan);
0555 kfree(qinst->rx_buff);
0556 }
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
0568 const struct of_phandle_args *p)
0569 {
0570 struct ti_msgmgr_inst *inst;
0571 int req_qid, req_pid;
0572 struct ti_queue_inst *qinst;
0573 const struct ti_msgmgr_desc *d;
0574 int i, ncells;
0575
0576 inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
0577 if (WARN_ON(!inst))
0578 return ERR_PTR(-EINVAL);
0579
0580 d = inst->desc;
0581
0582 if (d->is_sproxy)
0583 ncells = 1;
0584 else
0585 ncells = 2;
0586 if (p->args_count != ncells) {
0587 dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
0588 p->args_count, ncells);
0589 return ERR_PTR(-EINVAL);
0590 }
0591 if (ncells == 1) {
0592 req_qid = 0;
0593 req_pid = p->args[0];
0594 } else {
0595 req_qid = p->args[0];
0596 req_pid = p->args[1];
0597 }
0598
0599 if (d->is_sproxy) {
0600 if (req_pid >= d->num_valid_queues)
0601 goto err;
0602 qinst = &inst->qinsts[req_pid];
0603 return qinst->chan;
0604 }
0605
0606 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
0607 i++, qinst++) {
0608 if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
0609 return qinst->chan;
0610 }
0611
0612 err:
0613 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n",
0614 req_qid, req_pid, p->np);
0615 return ERR_PTR(-ENOENT);
0616 }
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631 static int ti_msgmgr_queue_setup(int idx, struct device *dev,
0632 struct device_node *np,
0633 struct ti_msgmgr_inst *inst,
0634 const struct ti_msgmgr_desc *d,
0635 const struct ti_msgmgr_valid_queue_desc *qd,
0636 struct ti_queue_inst *qinst,
0637 struct mbox_chan *chan)
0638 {
0639 char *dir;
0640
0641 qinst->proxy_id = qd->proxy_id;
0642 qinst->queue_id = qd->queue_id;
0643
0644 if (qinst->queue_id > d->queue_count) {
0645 dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
0646 idx, qinst->queue_id, d->queue_count);
0647 return -ERANGE;
0648 }
0649
0650 if (d->is_sproxy) {
0651 qinst->queue_buff_start = inst->queue_proxy_region +
0652 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
0653 d->data_first_reg);
0654 qinst->queue_buff_end = inst->queue_proxy_region +
0655 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
0656 d->data_last_reg);
0657 qinst->queue_state = inst->queue_state_debug_region +
0658 SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
0659 qinst->queue_ctrl = inst->queue_ctrl_region +
0660 SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
0661
0662
0663 dir = "thr";
0664 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
0665 dev_name(dev), dir, qinst->proxy_id);
0666 } else {
0667 qinst->queue_buff_start = inst->queue_proxy_region +
0668 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
0669 d->data_first_reg);
0670 qinst->queue_buff_end = inst->queue_proxy_region +
0671 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
0672 d->data_last_reg);
0673 qinst->queue_state =
0674 inst->queue_state_debug_region +
0675 Q_STATE_OFFSET(qinst->queue_id);
0676 qinst->is_tx = qd->is_tx;
0677 dir = qinst->is_tx ? "tx" : "rx";
0678 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
0679 dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
0680 }
0681
0682 qinst->chan = chan;
0683
0684
0685 qinst->irq = -EINVAL;
0686
0687 chan->con_priv = qinst;
0688
0689 dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
0690 idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
0691 qinst->queue_buff_start, qinst->queue_buff_end);
0692 return 0;
0693 }
0694
0695 static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable)
0696 {
0697 if (enable) {
0698 disable_irq(qinst->irq);
0699 qinst->polled_rx_mode = true;
0700 } else {
0701 enable_irq(qinst->irq);
0702 qinst->polled_rx_mode = false;
0703 }
0704
0705 return 0;
0706 }
0707
0708 static int ti_msgmgr_suspend(struct device *dev)
0709 {
0710 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0711 struct ti_queue_inst *qinst;
0712 int i;
0713
0714
0715
0716
0717
0718
0719 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
0720 if (!qinst->is_tx)
0721 ti_msgmgr_queue_rx_set_polled_mode(qinst, true);
0722 }
0723
0724 return 0;
0725 }
0726
0727 static int ti_msgmgr_resume(struct device *dev)
0728 {
0729 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
0730 struct ti_queue_inst *qinst;
0731 int i;
0732
0733 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
0734 if (!qinst->is_tx)
0735 ti_msgmgr_queue_rx_set_polled_mode(qinst, false);
0736 }
0737
0738 return 0;
0739 }
0740
0741 static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume);
0742
0743
0744 static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
0745 .startup = ti_msgmgr_queue_startup,
0746 .shutdown = ti_msgmgr_queue_shutdown,
0747 .peek_data = ti_msgmgr_queue_peek_data,
0748 .last_tx_done = ti_msgmgr_last_tx_done,
0749 .send_data = ti_msgmgr_send_data,
0750 };
0751
0752
0753 static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
0754 {.queue_id = 0, .proxy_id = 0, .is_tx = true,},
0755 {.queue_id = 1, .proxy_id = 0, .is_tx = true,},
0756 {.queue_id = 2, .proxy_id = 0, .is_tx = true,},
0757 {.queue_id = 3, .proxy_id = 0, .is_tx = true,},
0758 {.queue_id = 5, .proxy_id = 2, .is_tx = false,},
0759 {.queue_id = 56, .proxy_id = 1, .is_tx = true,},
0760 {.queue_id = 57, .proxy_id = 2, .is_tx = false,},
0761 {.queue_id = 58, .proxy_id = 3, .is_tx = true,},
0762 {.queue_id = 59, .proxy_id = 4, .is_tx = true,},
0763 {.queue_id = 60, .proxy_id = 5, .is_tx = true,},
0764 {.queue_id = 61, .proxy_id = 6, .is_tx = true,},
0765 };
0766
0767 static const struct ti_msgmgr_desc k2g_desc = {
0768 .queue_count = 64,
0769 .max_message_size = 64,
0770 .max_messages = 128,
0771 .data_region_name = "queue_proxy_region",
0772 .status_region_name = "queue_state_debug_region",
0773 .data_first_reg = 16,
0774 .data_last_reg = 31,
0775 .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
0776 .tx_polled = false,
0777 .valid_queues = k2g_valid_queues,
0778 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
0779 .is_sproxy = false,
0780 };
0781
0782 static const struct ti_msgmgr_desc am654_desc = {
0783 .queue_count = 190,
0784 .num_valid_queues = 190,
0785 .max_message_size = 60,
0786 .data_region_name = "target_data",
0787 .status_region_name = "rt",
0788 .ctrl_region_name = "scfg",
0789 .data_first_reg = 0,
0790 .data_last_reg = 14,
0791 .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
0792 .tx_polled = false,
0793 .is_sproxy = true,
0794 };
0795
0796 static const struct of_device_id ti_msgmgr_of_match[] = {
0797 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
0798 {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
0799 { }
0800 };
0801
0802 MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
0803
0804 static int ti_msgmgr_probe(struct platform_device *pdev)
0805 {
0806 struct device *dev = &pdev->dev;
0807 const struct of_device_id *of_id;
0808 struct device_node *np;
0809 struct resource *res;
0810 const struct ti_msgmgr_desc *desc;
0811 struct ti_msgmgr_inst *inst;
0812 struct ti_queue_inst *qinst;
0813 struct mbox_controller *mbox;
0814 struct mbox_chan *chans;
0815 int queue_count;
0816 int i;
0817 int ret = -EINVAL;
0818 const struct ti_msgmgr_valid_queue_desc *queue_desc;
0819
0820 if (!dev->of_node) {
0821 dev_err(dev, "no OF information\n");
0822 return -EINVAL;
0823 }
0824 np = dev->of_node;
0825
0826 of_id = of_match_device(ti_msgmgr_of_match, dev);
0827 if (!of_id) {
0828 dev_err(dev, "OF data missing\n");
0829 return -EINVAL;
0830 }
0831 desc = of_id->data;
0832
0833 inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
0834 if (!inst)
0835 return -ENOMEM;
0836
0837 inst->dev = dev;
0838 inst->desc = desc;
0839
0840 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
0841 desc->data_region_name);
0842 inst->queue_proxy_region = devm_ioremap_resource(dev, res);
0843 if (IS_ERR(inst->queue_proxy_region))
0844 return PTR_ERR(inst->queue_proxy_region);
0845
0846 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
0847 desc->status_region_name);
0848 inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
0849 if (IS_ERR(inst->queue_state_debug_region))
0850 return PTR_ERR(inst->queue_state_debug_region);
0851
0852 if (desc->is_sproxy) {
0853 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
0854 desc->ctrl_region_name);
0855 inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
0856 if (IS_ERR(inst->queue_ctrl_region))
0857 return PTR_ERR(inst->queue_ctrl_region);
0858 }
0859
0860 dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
0861 inst->queue_proxy_region, inst->queue_state_debug_region);
0862
0863 queue_count = desc->num_valid_queues;
0864 if (!queue_count || queue_count > desc->queue_count) {
0865 dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
0866 queue_count, desc->queue_count);
0867 return -ERANGE;
0868 }
0869 inst->num_valid_queues = queue_count;
0870
0871 qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL);
0872 if (!qinst)
0873 return -ENOMEM;
0874 inst->qinsts = qinst;
0875
0876 chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL);
0877 if (!chans)
0878 return -ENOMEM;
0879 inst->chans = chans;
0880
0881 if (desc->is_sproxy) {
0882 struct ti_msgmgr_valid_queue_desc sproxy_desc;
0883
0884
0885 for (i = 0; i < queue_count; i++, qinst++, chans++) {
0886 sproxy_desc.queue_id = 0;
0887 sproxy_desc.proxy_id = i;
0888 ret = ti_msgmgr_queue_setup(i, dev, np, inst,
0889 desc, &sproxy_desc, qinst,
0890 chans);
0891 if (ret)
0892 return ret;
0893 }
0894 } else {
0895
0896 for (i = 0, queue_desc = desc->valid_queues;
0897 i < queue_count; i++, qinst++, chans++, queue_desc++) {
0898 ret = ti_msgmgr_queue_setup(i, dev, np, inst,
0899 desc, queue_desc, qinst,
0900 chans);
0901 if (ret)
0902 return ret;
0903 }
0904 }
0905
0906 mbox = &inst->mbox;
0907 mbox->dev = dev;
0908 mbox->ops = &ti_msgmgr_chan_ops;
0909 mbox->chans = inst->chans;
0910 mbox->num_chans = inst->num_valid_queues;
0911 mbox->txdone_irq = false;
0912 mbox->txdone_poll = desc->tx_polled;
0913 if (desc->tx_polled)
0914 mbox->txpoll_period = desc->tx_poll_timeout_ms;
0915 mbox->of_xlate = ti_msgmgr_of_xlate;
0916
0917 platform_set_drvdata(pdev, inst);
0918 ret = devm_mbox_controller_register(dev, mbox);
0919 if (ret)
0920 dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
0921
0922 return ret;
0923 }
0924
0925 static struct platform_driver ti_msgmgr_driver = {
0926 .probe = ti_msgmgr_probe,
0927 .driver = {
0928 .name = "ti-msgmgr",
0929 .of_match_table = of_match_ptr(ti_msgmgr_of_match),
0930 .pm = &ti_msgmgr_pm_ops,
0931 },
0932 };
0933 module_platform_driver(ti_msgmgr_driver);
0934
0935 MODULE_LICENSE("GPL v2");
0936 MODULE_DESCRIPTION("TI message manager driver");
0937 MODULE_AUTHOR("Nishanth Menon");
0938 MODULE_ALIAS("platform:ti-msgmgr");