0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dma-mapping.h>
0010 #include <linux/err.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/kernel.h>
0013 #include <linux/mailbox_client.h>
0014 #include <linux/module.h>
0015 #include <linux/of_address.h>
0016 #include <linux/of_device.h>
0017 #include <linux/of_reserved_mem.h>
0018 #include <linux/omap-mailbox.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/pm_runtime.h>
0021 #include <linux/remoteproc.h>
0022 #include <linux/reset.h>
0023 #include <linux/slab.h>
0024
0025 #include "omap_remoteproc.h"
0026 #include "remoteproc_internal.h"
0027 #include "ti_sci_proc.h"
0028
0029
0030 #define K3_R5_TCM_DEV_ADDR 0x41010000
0031
0032
0033 #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
0034 #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
0035 #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
0036 #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
0037 #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
0038 #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
0039 #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
0040 #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
0041
0042 #define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
0043
0044 #define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
0045
0046
0047 #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
0048
0049
0050 #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
0051 #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
0052 #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
0053 #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
0054
0055 #define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
0056
0057
0058
0059
0060
0061
0062
0063
0064 struct k3_r5_mem {
0065 void __iomem *cpu_addr;
0066 phys_addr_t bus_addr;
0067 u32 dev_addr;
0068 size_t size;
0069 };
0070
0071
0072
0073
0074
0075
0076
0077
0078 enum cluster_mode {
0079 CLUSTER_MODE_SPLIT = 0,
0080 CLUSTER_MODE_LOCKSTEP,
0081 CLUSTER_MODE_SINGLECPU,
0082 };
0083
0084
0085
0086
0087
0088
0089
0090 struct k3_r5_soc_data {
0091 bool tcm_is_double;
0092 bool tcm_ecc_autoinit;
0093 bool single_cpu_mode;
0094 };
0095
0096
0097
0098
0099
0100
0101
0102
0103 struct k3_r5_cluster {
0104 struct device *dev;
0105 enum cluster_mode mode;
0106 struct list_head cores;
0107 const struct k3_r5_soc_data *soc_data;
0108 };
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 struct k3_r5_core {
0128 struct list_head elem;
0129 struct device *dev;
0130 struct rproc *rproc;
0131 struct k3_r5_mem *mem;
0132 struct k3_r5_mem *sram;
0133 int num_mems;
0134 int num_sram;
0135 struct reset_control *reset;
0136 struct ti_sci_proc *tsp;
0137 const struct ti_sci_handle *ti_sci;
0138 u32 ti_sci_id;
0139 u32 atcm_enable;
0140 u32 btcm_enable;
0141 u32 loczrama;
0142 };
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 struct k3_r5_rproc {
0156 struct device *dev;
0157 struct k3_r5_cluster *cluster;
0158 struct mbox_chan *mbox;
0159 struct mbox_client client;
0160 struct rproc *rproc;
0161 struct k3_r5_core *core;
0162 struct k3_r5_mem *rmem;
0163 int num_rmems;
0164 };
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
0181 {
0182 struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
0183 client);
0184 struct device *dev = kproc->rproc->dev.parent;
0185 const char *name = kproc->rproc->name;
0186 u32 msg = omap_mbox_message(data);
0187
0188 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
0189
0190 switch (msg) {
0191 case RP_MBOX_CRASH:
0192
0193
0194
0195
0196 dev_err(dev, "K3 R5F rproc %s crashed\n", name);
0197 break;
0198 case RP_MBOX_ECHO_REPLY:
0199 dev_info(dev, "received echo reply from %s\n", name);
0200 break;
0201 default:
0202
0203 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
0204 return;
0205 if (msg > kproc->rproc->max_notifyid) {
0206 dev_dbg(dev, "dropping unknown message 0x%x", msg);
0207 return;
0208 }
0209
0210 if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
0211 dev_dbg(dev, "no message was found in vqid %d\n", msg);
0212 }
0213 }
0214
0215
0216 static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
0217 {
0218 struct k3_r5_rproc *kproc = rproc->priv;
0219 struct device *dev = rproc->dev.parent;
0220 mbox_msg_t msg = (mbox_msg_t)vqid;
0221 int ret;
0222
0223
0224 ret = mbox_send_message(kproc->mbox, (void *)msg);
0225 if (ret < 0)
0226 dev_err(dev, "failed to send mailbox message, status = %d\n",
0227 ret);
0228 }
0229
0230 static int k3_r5_split_reset(struct k3_r5_core *core)
0231 {
0232 int ret;
0233
0234 ret = reset_control_assert(core->reset);
0235 if (ret) {
0236 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
0237 ret);
0238 return ret;
0239 }
0240
0241 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
0242 core->ti_sci_id);
0243 if (ret) {
0244 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
0245 ret);
0246 if (reset_control_deassert(core->reset))
0247 dev_warn(core->dev, "local-reset deassert back failed\n");
0248 }
0249
0250 return ret;
0251 }
0252
0253 static int k3_r5_split_release(struct k3_r5_core *core)
0254 {
0255 int ret;
0256
0257 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
0258 core->ti_sci_id);
0259 if (ret) {
0260 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
0261 ret);
0262 return ret;
0263 }
0264
0265 ret = reset_control_deassert(core->reset);
0266 if (ret) {
0267 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
0268 ret);
0269 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
0270 core->ti_sci_id))
0271 dev_warn(core->dev, "module-reset assert back failed\n");
0272 }
0273
0274 return ret;
0275 }
0276
0277 static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
0278 {
0279 struct k3_r5_core *core;
0280 int ret;
0281
0282
0283 list_for_each_entry(core, &cluster->cores, elem) {
0284 ret = reset_control_assert(core->reset);
0285 if (ret) {
0286 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
0287 ret);
0288 core = list_prev_entry(core, elem);
0289 goto unroll_local_reset;
0290 }
0291 }
0292
0293
0294 list_for_each_entry(core, &cluster->cores, elem) {
0295 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
0296 core->ti_sci_id);
0297 if (ret) {
0298 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
0299 ret);
0300 goto unroll_module_reset;
0301 }
0302 }
0303
0304 return 0;
0305
0306 unroll_module_reset:
0307 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
0308 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
0309 core->ti_sci_id))
0310 dev_warn(core->dev, "module-reset assert back failed\n");
0311 }
0312 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
0313 unroll_local_reset:
0314 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
0315 if (reset_control_deassert(core->reset))
0316 dev_warn(core->dev, "local-reset deassert back failed\n");
0317 }
0318
0319 return ret;
0320 }
0321
0322 static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
0323 {
0324 struct k3_r5_core *core;
0325 int ret;
0326
0327
0328 list_for_each_entry_reverse(core, &cluster->cores, elem) {
0329 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
0330 core->ti_sci_id);
0331 if (ret) {
0332 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
0333 ret);
0334 core = list_next_entry(core, elem);
0335 goto unroll_module_reset;
0336 }
0337 }
0338
0339
0340 list_for_each_entry_reverse(core, &cluster->cores, elem) {
0341 ret = reset_control_deassert(core->reset);
0342 if (ret) {
0343 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
0344 ret);
0345 goto unroll_local_reset;
0346 }
0347 }
0348
0349 return 0;
0350
0351 unroll_local_reset:
0352 list_for_each_entry_continue(core, &cluster->cores, elem) {
0353 if (reset_control_assert(core->reset))
0354 dev_warn(core->dev, "local-reset assert back failed\n");
0355 }
0356 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
0357 unroll_module_reset:
0358 list_for_each_entry_from(core, &cluster->cores, elem) {
0359 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
0360 core->ti_sci_id))
0361 dev_warn(core->dev, "module-reset assert back failed\n");
0362 }
0363
0364 return ret;
0365 }
0366
0367 static inline int k3_r5_core_halt(struct k3_r5_core *core)
0368 {
0369 return ti_sci_proc_set_control(core->tsp,
0370 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
0371 }
0372
0373 static inline int k3_r5_core_run(struct k3_r5_core *core)
0374 {
0375 return ti_sci_proc_set_control(core->tsp,
0376 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
0377 }
0378
0379 static int k3_r5_rproc_request_mbox(struct rproc *rproc)
0380 {
0381 struct k3_r5_rproc *kproc = rproc->priv;
0382 struct mbox_client *client = &kproc->client;
0383 struct device *dev = kproc->dev;
0384 int ret;
0385
0386 client->dev = dev;
0387 client->tx_done = NULL;
0388 client->rx_callback = k3_r5_rproc_mbox_callback;
0389 client->tx_block = false;
0390 client->knows_txdone = false;
0391
0392 kproc->mbox = mbox_request_channel(client, 0);
0393 if (IS_ERR(kproc->mbox)) {
0394 ret = -EBUSY;
0395 dev_err(dev, "mbox_request_channel failed: %ld\n",
0396 PTR_ERR(kproc->mbox));
0397 return ret;
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
0408 if (ret < 0) {
0409 dev_err(dev, "mbox_send_message failed: %d\n", ret);
0410 mbox_free_channel(kproc->mbox);
0411 return ret;
0412 }
0413
0414 return 0;
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 static int k3_r5_rproc_prepare(struct rproc *rproc)
0434 {
0435 struct k3_r5_rproc *kproc = rproc->priv;
0436 struct k3_r5_cluster *cluster = kproc->cluster;
0437 struct k3_r5_core *core = kproc->core;
0438 struct device *dev = kproc->dev;
0439 u32 ctrl = 0, cfg = 0, stat = 0;
0440 u64 boot_vec = 0;
0441 bool mem_init_dis;
0442 int ret;
0443
0444 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
0445 if (ret < 0)
0446 return ret;
0447 mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
0448
0449
0450 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
0451 cluster->mode == CLUSTER_MODE_SINGLECPU) ?
0452 k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
0453 if (ret) {
0454 dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
0455 ret);
0456 return ret;
0457 }
0458
0459
0460
0461
0462
0463
0464
0465 if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
0466 dev_dbg(dev, "leveraging h/w init for TCM memories\n");
0467 return 0;
0468 }
0469
0470
0471
0472
0473
0474
0475 dev_dbg(dev, "zeroing out ATCM memory\n");
0476 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
0477
0478 dev_dbg(dev, "zeroing out BTCM memory\n");
0479 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
0480
0481 return 0;
0482 }
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 static int k3_r5_rproc_unprepare(struct rproc *rproc)
0501 {
0502 struct k3_r5_rproc *kproc = rproc->priv;
0503 struct k3_r5_cluster *cluster = kproc->cluster;
0504 struct k3_r5_core *core = kproc->core;
0505 struct device *dev = kproc->dev;
0506 int ret;
0507
0508
0509 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
0510 cluster->mode == CLUSTER_MODE_SINGLECPU) ?
0511 k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
0512 if (ret)
0513 dev_err(dev, "unable to disable cores, ret = %d\n", ret);
0514
0515 return ret;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 static int k3_r5_rproc_start(struct rproc *rproc)
0536 {
0537 struct k3_r5_rproc *kproc = rproc->priv;
0538 struct k3_r5_cluster *cluster = kproc->cluster;
0539 struct device *dev = kproc->dev;
0540 struct k3_r5_core *core;
0541 u32 boot_addr;
0542 int ret;
0543
0544 ret = k3_r5_rproc_request_mbox(rproc);
0545 if (ret)
0546 return ret;
0547
0548 boot_addr = rproc->bootaddr;
0549
0550 dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
0551
0552
0553 core = kproc->core;
0554 ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
0555 if (ret)
0556 goto put_mbox;
0557
0558
0559 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
0560 list_for_each_entry_reverse(core, &cluster->cores, elem) {
0561 ret = k3_r5_core_run(core);
0562 if (ret)
0563 goto unroll_core_run;
0564 }
0565 } else {
0566 ret = k3_r5_core_run(core);
0567 if (ret)
0568 goto put_mbox;
0569 }
0570
0571 return 0;
0572
0573 unroll_core_run:
0574 list_for_each_entry_continue(core, &cluster->cores, elem) {
0575 if (k3_r5_core_halt(core))
0576 dev_warn(core->dev, "core halt back failed\n");
0577 }
0578 put_mbox:
0579 mbox_free_channel(kproc->mbox);
0580 return ret;
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 static int k3_r5_rproc_stop(struct rproc *rproc)
0608 {
0609 struct k3_r5_rproc *kproc = rproc->priv;
0610 struct k3_r5_cluster *cluster = kproc->cluster;
0611 struct k3_r5_core *core = kproc->core;
0612 int ret;
0613
0614
0615 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
0616 list_for_each_entry(core, &cluster->cores, elem) {
0617 ret = k3_r5_core_halt(core);
0618 if (ret) {
0619 core = list_prev_entry(core, elem);
0620 goto unroll_core_halt;
0621 }
0622 }
0623 } else {
0624 ret = k3_r5_core_halt(core);
0625 if (ret)
0626 goto out;
0627 }
0628
0629 mbox_free_channel(kproc->mbox);
0630
0631 return 0;
0632
0633 unroll_core_halt:
0634 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
0635 if (k3_r5_core_run(core))
0636 dev_warn(core->dev, "core run back failed\n");
0637 }
0638 out:
0639 return ret;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 static int k3_r5_rproc_attach(struct rproc *rproc)
0651 {
0652 struct k3_r5_rproc *kproc = rproc->priv;
0653 struct device *dev = kproc->dev;
0654 int ret;
0655
0656 ret = k3_r5_rproc_request_mbox(rproc);
0657 if (ret)
0658 return ret;
0659
0660 dev_info(dev, "R5F core initialized in IPC-only mode\n");
0661 return 0;
0662 }
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 static int k3_r5_rproc_detach(struct rproc *rproc)
0673 {
0674 struct k3_r5_rproc *kproc = rproc->priv;
0675 struct device *dev = kproc->dev;
0676
0677 mbox_free_channel(kproc->mbox);
0678 dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
0679 return 0;
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
0693 size_t *rsc_table_sz)
0694 {
0695 struct k3_r5_rproc *kproc = rproc->priv;
0696 struct device *dev = kproc->dev;
0697
0698 if (!kproc->rmem[0].cpu_addr) {
0699 dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
0700 return ERR_PTR(-ENOMEM);
0701 }
0702
0703
0704
0705
0706
0707
0708
0709
0710 *rsc_table_sz = 256;
0711 return (struct resource_table *)kproc->rmem[0].cpu_addr;
0712 }
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722 static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
0723 {
0724 struct k3_r5_rproc *kproc = rproc->priv;
0725 struct k3_r5_core *core = kproc->core;
0726 void __iomem *va = NULL;
0727 phys_addr_t bus_addr;
0728 u32 dev_addr, offset;
0729 size_t size;
0730 int i;
0731
0732 if (len == 0)
0733 return NULL;
0734
0735
0736 for (i = 0; i < core->num_mems; i++) {
0737 bus_addr = core->mem[i].bus_addr;
0738 dev_addr = core->mem[i].dev_addr;
0739 size = core->mem[i].size;
0740
0741
0742 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
0743 offset = da - dev_addr;
0744 va = core->mem[i].cpu_addr + offset;
0745 return (__force void *)va;
0746 }
0747
0748
0749 if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
0750 offset = da - bus_addr;
0751 va = core->mem[i].cpu_addr + offset;
0752 return (__force void *)va;
0753 }
0754 }
0755
0756
0757 for (i = 0; i < core->num_sram; i++) {
0758 dev_addr = core->sram[i].dev_addr;
0759 size = core->sram[i].size;
0760
0761 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
0762 offset = da - dev_addr;
0763 va = core->sram[i].cpu_addr + offset;
0764 return (__force void *)va;
0765 }
0766 }
0767
0768
0769 for (i = 0; i < kproc->num_rmems; i++) {
0770 dev_addr = kproc->rmem[i].dev_addr;
0771 size = kproc->rmem[i].size;
0772
0773 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
0774 offset = da - dev_addr;
0775 va = kproc->rmem[i].cpu_addr + offset;
0776 return (__force void *)va;
0777 }
0778 }
0779
0780 return NULL;
0781 }
0782
0783 static const struct rproc_ops k3_r5_rproc_ops = {
0784 .prepare = k3_r5_rproc_prepare,
0785 .unprepare = k3_r5_rproc_unprepare,
0786 .start = k3_r5_rproc_start,
0787 .stop = k3_r5_rproc_stop,
0788 .kick = k3_r5_rproc_kick,
0789 .da_to_va = k3_r5_rproc_da_to_va,
0790 };
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
0828 {
0829 struct k3_r5_cluster *cluster = kproc->cluster;
0830 struct device *dev = kproc->dev;
0831 struct k3_r5_core *core0, *core, *temp;
0832 u32 ctrl = 0, cfg = 0, stat = 0;
0833 u32 set_cfg = 0, clr_cfg = 0;
0834 u64 boot_vec = 0;
0835 bool lockstep_en;
0836 bool single_cpu;
0837 int ret;
0838
0839 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
0840 if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
0841 cluster->mode == CLUSTER_MODE_SINGLECPU) {
0842 core = core0;
0843 } else {
0844 core = kproc->core;
0845 }
0846
0847 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
0848 &stat);
0849 if (ret < 0)
0850 return ret;
0851
0852 dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
0853 boot_vec, cfg, ctrl, stat);
0854
0855
0856 if (cluster->soc_data->single_cpu_mode) {
0857 single_cpu =
0858 !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
0859 if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
0860 dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
0861 cluster->mode = CLUSTER_MODE_SINGLECPU;
0862 }
0863 goto config;
0864 }
0865
0866
0867 lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
0868 if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
0869 dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
0870 cluster->mode = CLUSTER_MODE_SPLIT;
0871 }
0872
0873 config:
0874
0875 boot_vec = 0x0;
0876 if (core == core0) {
0877 clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
0878 if (cluster->soc_data->single_cpu_mode) {
0879
0880
0881
0882
0883
0884
0885 if (cluster->mode == CLUSTER_MODE_SINGLECPU)
0886 set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
0887 } else {
0888
0889
0890
0891
0892
0893
0894 if (lockstep_en)
0895 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
0896 }
0897 }
0898
0899 if (core->atcm_enable)
0900 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
0901 else
0902 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
0903
0904 if (core->btcm_enable)
0905 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
0906 else
0907 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
0908
0909 if (core->loczrama)
0910 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
0911 else
0912 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
0913
0914 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
0915
0916
0917
0918
0919
0920 list_for_each_entry(temp, &cluster->cores, elem) {
0921 ret = k3_r5_core_halt(temp);
0922 if (ret)
0923 goto out;
0924
0925 if (temp != core) {
0926 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
0927 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
0928 }
0929 ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
0930 set_cfg, clr_cfg);
0931 if (ret)
0932 goto out;
0933 }
0934
0935 set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
0936 clr_cfg = 0;
0937 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
0938 set_cfg, clr_cfg);
0939 } else {
0940 ret = k3_r5_core_halt(core);
0941 if (ret)
0942 goto out;
0943
0944 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
0945 set_cfg, clr_cfg);
0946 }
0947
0948 out:
0949 return ret;
0950 }
0951
0952 static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
0953 {
0954 struct device *dev = kproc->dev;
0955 struct device_node *np = dev_of_node(dev);
0956 struct device_node *rmem_np;
0957 struct reserved_mem *rmem;
0958 int num_rmems;
0959 int ret, i;
0960
0961 num_rmems = of_property_count_elems_of_size(np, "memory-region",
0962 sizeof(phandle));
0963 if (num_rmems <= 0) {
0964 dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
0965 num_rmems);
0966 return -EINVAL;
0967 }
0968 if (num_rmems < 2) {
0969 dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
0970 num_rmems);
0971 return -EINVAL;
0972 }
0973
0974
0975 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
0976 if (ret) {
0977 dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
0978 ret);
0979 return ret;
0980 }
0981
0982 num_rmems--;
0983 kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
0984 if (!kproc->rmem) {
0985 ret = -ENOMEM;
0986 goto release_rmem;
0987 }
0988
0989
0990 for (i = 0; i < num_rmems; i++) {
0991 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
0992 if (!rmem_np) {
0993 ret = -EINVAL;
0994 goto unmap_rmem;
0995 }
0996
0997 rmem = of_reserved_mem_lookup(rmem_np);
0998 if (!rmem) {
0999 of_node_put(rmem_np);
1000 ret = -EINVAL;
1001 goto unmap_rmem;
1002 }
1003 of_node_put(rmem_np);
1004
1005 kproc->rmem[i].bus_addr = rmem->base;
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 kproc->rmem[i].dev_addr = (u32)rmem->base;
1017 kproc->rmem[i].size = rmem->size;
1018 kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
1019 if (!kproc->rmem[i].cpu_addr) {
1020 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
1021 i + 1, &rmem->base, &rmem->size);
1022 ret = -ENOMEM;
1023 goto unmap_rmem;
1024 }
1025
1026 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1027 i + 1, &kproc->rmem[i].bus_addr,
1028 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
1029 kproc->rmem[i].dev_addr);
1030 }
1031 kproc->num_rmems = num_rmems;
1032
1033 return 0;
1034
1035 unmap_rmem:
1036 for (i--; i >= 0; i--)
1037 iounmap(kproc->rmem[i].cpu_addr);
1038 kfree(kproc->rmem);
1039 release_rmem:
1040 of_reserved_mem_device_release(dev);
1041 return ret;
1042 }
1043
1044 static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
1045 {
1046 int i;
1047
1048 for (i = 0; i < kproc->num_rmems; i++)
1049 iounmap(kproc->rmem[i].cpu_addr);
1050 kfree(kproc->rmem);
1051
1052 of_reserved_mem_device_release(kproc->dev);
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
1069 {
1070 struct k3_r5_cluster *cluster = kproc->cluster;
1071 struct k3_r5_core *core = kproc->core;
1072 struct device *cdev = core->dev;
1073 struct k3_r5_core *core0;
1074
1075 if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
1076 cluster->mode == CLUSTER_MODE_SINGLECPU ||
1077 !cluster->soc_data->tcm_is_double)
1078 return;
1079
1080 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
1081 if (core == core0) {
1082 WARN_ON(core->mem[0].size != SZ_64K);
1083 WARN_ON(core->mem[1].size != SZ_64K);
1084
1085 core->mem[0].size /= 2;
1086 core->mem[1].size /= 2;
1087
1088 dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
1089 core->mem[0].size, core->mem[1].size);
1090 }
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
1107 {
1108 struct k3_r5_cluster *cluster = kproc->cluster;
1109 struct k3_r5_core *core = kproc->core;
1110 struct device *cdev = core->dev;
1111 bool r_state = false, c_state = false;
1112 u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
1113 u64 boot_vec = 0;
1114 u32 atcm_enable, btcm_enable, loczrama;
1115 struct k3_r5_core *core0;
1116 enum cluster_mode mode;
1117 int ret;
1118
1119 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
1120
1121 ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
1122 &r_state, &c_state);
1123 if (ret) {
1124 dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
1125 ret);
1126 return ret;
1127 }
1128 if (r_state != c_state) {
1129 dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
1130 r_state, c_state);
1131 }
1132
1133 ret = reset_control_status(core->reset);
1134 if (ret < 0) {
1135 dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
1136 ret);
1137 return ret;
1138 }
1139
1140 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
1141 &stat);
1142 if (ret < 0) {
1143 dev_err(cdev, "failed to get initial processor status, ret = %d\n",
1144 ret);
1145 return ret;
1146 }
1147 atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
1148 btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
1149 loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
1150 if (cluster->soc_data->single_cpu_mode) {
1151 mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
1152 CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
1153 } else {
1154 mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
1155 CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
1156 }
1157 halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
1158
1159
1160
1161
1162
1163
1164
1165 if (c_state && !ret && !halted) {
1166 dev_info(cdev, "configured R5F for IPC-only mode\n");
1167 kproc->rproc->state = RPROC_DETACHED;
1168 ret = 1;
1169
1170 kproc->rproc->ops->prepare = NULL;
1171 kproc->rproc->ops->unprepare = NULL;
1172 kproc->rproc->ops->start = NULL;
1173 kproc->rproc->ops->stop = NULL;
1174 kproc->rproc->ops->attach = k3_r5_rproc_attach;
1175 kproc->rproc->ops->detach = k3_r5_rproc_detach;
1176 kproc->rproc->ops->get_loaded_rsc_table =
1177 k3_r5_get_loaded_rsc_table;
1178 } else if (!c_state) {
1179 dev_info(cdev, "configured R5F for remoteproc mode\n");
1180 ret = 0;
1181 } else {
1182 dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
1183 !ret ? "deasserted" : "asserted",
1184 c_state ? "deasserted" : "asserted",
1185 halted ? "halted" : "unhalted");
1186 ret = -EINVAL;
1187 }
1188
1189
1190 if (ret > 0) {
1191 if (core == core0)
1192 cluster->mode = mode;
1193 core->atcm_enable = atcm_enable;
1194 core->btcm_enable = btcm_enable;
1195 core->loczrama = loczrama;
1196 core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
1197 core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
1198 }
1199
1200 return ret;
1201 }
1202
1203 static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
1204 {
1205 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1206 struct device *dev = &pdev->dev;
1207 struct k3_r5_rproc *kproc;
1208 struct k3_r5_core *core, *core1;
1209 struct device *cdev;
1210 const char *fw_name;
1211 struct rproc *rproc;
1212 int ret, ret1;
1213
1214 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
1215 list_for_each_entry(core, &cluster->cores, elem) {
1216 cdev = core->dev;
1217 ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
1218 if (ret) {
1219 dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
1220 ret);
1221 goto out;
1222 }
1223
1224 rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
1225 fw_name, sizeof(*kproc));
1226 if (!rproc) {
1227 ret = -ENOMEM;
1228 goto out;
1229 }
1230
1231
1232 rproc->has_iommu = false;
1233
1234 rproc->recovery_disabled = true;
1235
1236 kproc = rproc->priv;
1237 kproc->cluster = cluster;
1238 kproc->core = core;
1239 kproc->dev = cdev;
1240 kproc->rproc = rproc;
1241 core->rproc = rproc;
1242
1243 ret = k3_r5_rproc_configure_mode(kproc);
1244 if (ret < 0)
1245 goto err_config;
1246 if (ret)
1247 goto init_rmem;
1248
1249 ret = k3_r5_rproc_configure(kproc);
1250 if (ret) {
1251 dev_err(dev, "initial configure failed, ret = %d\n",
1252 ret);
1253 goto err_config;
1254 }
1255
1256 init_rmem:
1257 k3_r5_adjust_tcm_sizes(kproc);
1258
1259 ret = k3_r5_reserved_mem_init(kproc);
1260 if (ret) {
1261 dev_err(dev, "reserved memory init failed, ret = %d\n",
1262 ret);
1263 goto err_config;
1264 }
1265
1266 ret = rproc_add(rproc);
1267 if (ret) {
1268 dev_err(dev, "rproc_add failed, ret = %d\n", ret);
1269 goto err_add;
1270 }
1271
1272
1273 if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
1274 cluster->mode == CLUSTER_MODE_SINGLECPU)
1275 break;
1276 }
1277
1278 return 0;
1279
1280 err_split:
1281 if (rproc->state == RPROC_ATTACHED) {
1282 ret1 = rproc_detach(rproc);
1283 if (ret1) {
1284 dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
1285 ret1);
1286 return ret1;
1287 }
1288 }
1289
1290 rproc_del(rproc);
1291 err_add:
1292 k3_r5_reserved_mem_exit(kproc);
1293 err_config:
1294 rproc_free(rproc);
1295 core->rproc = NULL;
1296 out:
1297
1298 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
1299 core = list_prev_entry(core, elem);
1300 rproc = core->rproc;
1301 kproc = rproc->priv;
1302 goto err_split;
1303 }
1304 return ret;
1305 }
1306
1307 static void k3_r5_cluster_rproc_exit(void *data)
1308 {
1309 struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1310 struct k3_r5_rproc *kproc;
1311 struct k3_r5_core *core;
1312 struct rproc *rproc;
1313 int ret;
1314
1315
1316
1317
1318
1319
1320 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
1321 cluster->mode == CLUSTER_MODE_SINGLECPU) ?
1322 list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
1323 list_last_entry(&cluster->cores, struct k3_r5_core, elem);
1324
1325 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
1326 rproc = core->rproc;
1327 kproc = rproc->priv;
1328
1329 if (rproc->state == RPROC_ATTACHED) {
1330 ret = rproc_detach(rproc);
1331 if (ret) {
1332 dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
1333 return;
1334 }
1335 }
1336
1337 rproc_del(rproc);
1338
1339 k3_r5_reserved_mem_exit(kproc);
1340
1341 rproc_free(rproc);
1342 core->rproc = NULL;
1343 }
1344 }
1345
1346 static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
1347 struct k3_r5_core *core)
1348 {
1349 static const char * const mem_names[] = {"atcm", "btcm"};
1350 struct device *dev = &pdev->dev;
1351 struct resource *res;
1352 int num_mems;
1353 int i;
1354
1355 num_mems = ARRAY_SIZE(mem_names);
1356 core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
1357 if (!core->mem)
1358 return -ENOMEM;
1359
1360 for (i = 0; i < num_mems; i++) {
1361 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1362 mem_names[i]);
1363 if (!res) {
1364 dev_err(dev, "found no memory resource for %s\n",
1365 mem_names[i]);
1366 return -EINVAL;
1367 }
1368 if (!devm_request_mem_region(dev, res->start,
1369 resource_size(res),
1370 dev_name(dev))) {
1371 dev_err(dev, "could not request %s region for resource\n",
1372 mem_names[i]);
1373 return -EBUSY;
1374 }
1375
1376
1377
1378
1379
1380
1381
1382
1383 core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1384 resource_size(res));
1385 if (!core->mem[i].cpu_addr) {
1386 dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1387 return -ENOMEM;
1388 }
1389 core->mem[i].bus_addr = res->start;
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 if (!strcmp(mem_names[i], "atcm")) {
1400 core->mem[i].dev_addr = core->loczrama ?
1401 0 : K3_R5_TCM_DEV_ADDR;
1402 } else {
1403 core->mem[i].dev_addr = core->loczrama ?
1404 K3_R5_TCM_DEV_ADDR : 0;
1405 }
1406 core->mem[i].size = resource_size(res);
1407
1408 dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1409 mem_names[i], &core->mem[i].bus_addr,
1410 core->mem[i].size, core->mem[i].cpu_addr,
1411 core->mem[i].dev_addr);
1412 }
1413 core->num_mems = num_mems;
1414
1415 return 0;
1416 }
1417
1418 static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1419 struct k3_r5_core *core)
1420 {
1421 struct device_node *np = pdev->dev.of_node;
1422 struct device *dev = &pdev->dev;
1423 struct device_node *sram_np;
1424 struct resource res;
1425 int num_sram;
1426 int i, ret;
1427
1428 num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1429 if (num_sram <= 0) {
1430 dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1431 num_sram);
1432 return 0;
1433 }
1434
1435 core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1436 if (!core->sram)
1437 return -ENOMEM;
1438
1439 for (i = 0; i < num_sram; i++) {
1440 sram_np = of_parse_phandle(np, "sram", i);
1441 if (!sram_np)
1442 return -EINVAL;
1443
1444 if (!of_device_is_available(sram_np)) {
1445 of_node_put(sram_np);
1446 return -EINVAL;
1447 }
1448
1449 ret = of_address_to_resource(sram_np, 0, &res);
1450 of_node_put(sram_np);
1451 if (ret)
1452 return -EINVAL;
1453
1454 core->sram[i].bus_addr = res.start;
1455 core->sram[i].dev_addr = res.start;
1456 core->sram[i].size = resource_size(&res);
1457 core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1458 resource_size(&res));
1459 if (!core->sram[i].cpu_addr) {
1460 dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1461 i, &res.start);
1462 return -ENOMEM;
1463 }
1464
1465 dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1466 i, &core->sram[i].bus_addr,
1467 core->sram[i].size, core->sram[i].cpu_addr,
1468 core->sram[i].dev_addr);
1469 }
1470 core->num_sram = num_sram;
1471
1472 return 0;
1473 }
1474
1475 static
1476 struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1477 const struct ti_sci_handle *sci)
1478 {
1479 struct ti_sci_proc *tsp;
1480 u32 temp[2];
1481 int ret;
1482
1483 ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1484 temp, 2);
1485 if (ret < 0)
1486 return ERR_PTR(ret);
1487
1488 tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1489 if (!tsp)
1490 return ERR_PTR(-ENOMEM);
1491
1492 tsp->dev = dev;
1493 tsp->sci = sci;
1494 tsp->ops = &sci->ops.proc_ops;
1495 tsp->proc_id = temp[0];
1496 tsp->host_id = temp[1];
1497
1498 return tsp;
1499 }
1500
1501 static int k3_r5_core_of_init(struct platform_device *pdev)
1502 {
1503 struct device *dev = &pdev->dev;
1504 struct device_node *np = dev_of_node(dev);
1505 struct k3_r5_core *core;
1506 int ret;
1507
1508 if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1509 return -ENOMEM;
1510
1511 core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1512 if (!core) {
1513 ret = -ENOMEM;
1514 goto err;
1515 }
1516
1517 core->dev = dev;
1518
1519
1520
1521
1522 core->atcm_enable = 0;
1523 core->btcm_enable = 1;
1524 core->loczrama = 1;
1525
1526 ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1527 if (ret < 0 && ret != -EINVAL) {
1528 dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1529 ret);
1530 goto err;
1531 }
1532
1533 ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1534 if (ret < 0 && ret != -EINVAL) {
1535 dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1536 ret);
1537 goto err;
1538 }
1539
1540 ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1541 if (ret < 0 && ret != -EINVAL) {
1542 dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1543 goto err;
1544 }
1545
1546 core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1547 if (IS_ERR(core->ti_sci)) {
1548 ret = PTR_ERR(core->ti_sci);
1549 if (ret != -EPROBE_DEFER) {
1550 dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1551 ret);
1552 }
1553 core->ti_sci = NULL;
1554 goto err;
1555 }
1556
1557 ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1558 if (ret) {
1559 dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1560 goto err;
1561 }
1562
1563 core->reset = devm_reset_control_get_exclusive(dev, NULL);
1564 if (IS_ERR_OR_NULL(core->reset)) {
1565 ret = PTR_ERR_OR_ZERO(core->reset);
1566 if (!ret)
1567 ret = -ENODEV;
1568 if (ret != -EPROBE_DEFER) {
1569 dev_err(dev, "failed to get reset handle, ret = %d\n",
1570 ret);
1571 }
1572 goto err;
1573 }
1574
1575 core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1576 if (IS_ERR(core->tsp)) {
1577 ret = PTR_ERR(core->tsp);
1578 dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1579 ret);
1580 goto err;
1581 }
1582
1583 ret = k3_r5_core_of_get_internal_memories(pdev, core);
1584 if (ret) {
1585 dev_err(dev, "failed to get internal memories, ret = %d\n",
1586 ret);
1587 goto err;
1588 }
1589
1590 ret = k3_r5_core_of_get_sram_memories(pdev, core);
1591 if (ret) {
1592 dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1593 goto err;
1594 }
1595
1596 ret = ti_sci_proc_request(core->tsp);
1597 if (ret < 0) {
1598 dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1599 goto err;
1600 }
1601
1602 platform_set_drvdata(pdev, core);
1603 devres_close_group(dev, k3_r5_core_of_init);
1604
1605 return 0;
1606
1607 err:
1608 devres_release_group(dev, k3_r5_core_of_init);
1609 return ret;
1610 }
1611
1612
1613
1614
1615
1616 static void k3_r5_core_of_exit(struct platform_device *pdev)
1617 {
1618 struct k3_r5_core *core = platform_get_drvdata(pdev);
1619 struct device *dev = &pdev->dev;
1620 int ret;
1621
1622 ret = ti_sci_proc_release(core->tsp);
1623 if (ret)
1624 dev_err(dev, "failed to release proc, ret = %d\n", ret);
1625
1626 platform_set_drvdata(pdev, NULL);
1627 devres_release_group(dev, k3_r5_core_of_init);
1628 }
1629
1630 static void k3_r5_cluster_of_exit(void *data)
1631 {
1632 struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1633 struct platform_device *cpdev;
1634 struct k3_r5_core *core, *temp;
1635
1636 list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1637 list_del(&core->elem);
1638 cpdev = to_platform_device(core->dev);
1639 k3_r5_core_of_exit(cpdev);
1640 }
1641 }
1642
1643 static int k3_r5_cluster_of_init(struct platform_device *pdev)
1644 {
1645 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1646 struct device *dev = &pdev->dev;
1647 struct device_node *np = dev_of_node(dev);
1648 struct platform_device *cpdev;
1649 struct device_node *child;
1650 struct k3_r5_core *core;
1651 int ret;
1652
1653 for_each_available_child_of_node(np, child) {
1654 cpdev = of_find_device_by_node(child);
1655 if (!cpdev) {
1656 ret = -ENODEV;
1657 dev_err(dev, "could not get R5 core platform device\n");
1658 of_node_put(child);
1659 goto fail;
1660 }
1661
1662 ret = k3_r5_core_of_init(cpdev);
1663 if (ret) {
1664 dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1665 ret);
1666 put_device(&cpdev->dev);
1667 of_node_put(child);
1668 goto fail;
1669 }
1670
1671 core = platform_get_drvdata(cpdev);
1672 put_device(&cpdev->dev);
1673 list_add_tail(&core->elem, &cluster->cores);
1674 }
1675
1676 return 0;
1677
1678 fail:
1679 k3_r5_cluster_of_exit(pdev);
1680 return ret;
1681 }
1682
1683 static int k3_r5_probe(struct platform_device *pdev)
1684 {
1685 struct device *dev = &pdev->dev;
1686 struct device_node *np = dev_of_node(dev);
1687 struct k3_r5_cluster *cluster;
1688 const struct k3_r5_soc_data *data;
1689 int ret;
1690 int num_cores;
1691
1692 data = of_device_get_match_data(&pdev->dev);
1693 if (!data) {
1694 dev_err(dev, "SoC-specific data is not defined\n");
1695 return -ENODEV;
1696 }
1697
1698 cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1699 if (!cluster)
1700 return -ENOMEM;
1701
1702 cluster->dev = dev;
1703
1704
1705
1706
1707 cluster->mode = data->single_cpu_mode ?
1708 CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
1709 cluster->soc_data = data;
1710 INIT_LIST_HEAD(&cluster->cores);
1711
1712 ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1713 if (ret < 0 && ret != -EINVAL) {
1714 dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1715 ret);
1716 return ret;
1717 }
1718
1719 num_cores = of_get_available_child_count(np);
1720 if (num_cores != 2) {
1721 dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1722 num_cores);
1723 return -ENODEV;
1724 }
1725
1726 platform_set_drvdata(pdev, cluster);
1727
1728 ret = devm_of_platform_populate(dev);
1729 if (ret) {
1730 dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1731 ret);
1732 return ret;
1733 }
1734
1735 ret = k3_r5_cluster_of_init(pdev);
1736 if (ret) {
1737 dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1738 return ret;
1739 }
1740
1741 ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
1742 if (ret)
1743 return ret;
1744
1745 ret = k3_r5_cluster_rproc_init(pdev);
1746 if (ret) {
1747 dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1748 ret);
1749 return ret;
1750 }
1751
1752 ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
1753 if (ret)
1754 return ret;
1755
1756 return 0;
1757 }
1758
1759 static const struct k3_r5_soc_data am65_j721e_soc_data = {
1760 .tcm_is_double = false,
1761 .tcm_ecc_autoinit = false,
1762 .single_cpu_mode = false,
1763 };
1764
1765 static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
1766 .tcm_is_double = true,
1767 .tcm_ecc_autoinit = true,
1768 .single_cpu_mode = false,
1769 };
1770
1771 static const struct k3_r5_soc_data am64_soc_data = {
1772 .tcm_is_double = true,
1773 .tcm_ecc_autoinit = true,
1774 .single_cpu_mode = true,
1775 };
1776
1777 static const struct of_device_id k3_r5_of_match[] = {
1778 { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
1779 { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
1780 { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
1781 { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
1782 { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
1783 { },
1784 };
1785 MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1786
1787 static struct platform_driver k3_r5_rproc_driver = {
1788 .probe = k3_r5_probe,
1789 .driver = {
1790 .name = "k3_r5_rproc",
1791 .of_match_table = k3_r5_of_match,
1792 },
1793 };
1794
1795 module_platform_driver(k3_r5_rproc_driver);
1796
1797 MODULE_LICENSE("GPL v2");
1798 MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1799 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");