0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "%s: " fmt, __func__
0018
0019 #include <linux/delay.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/device.h>
0023 #include <linux/panic_notifier.h>
0024 #include <linux/slab.h>
0025 #include <linux/mutex.h>
0026 #include <linux/dma-map-ops.h>
0027 #include <linux/dma-mapping.h>
0028 #include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */
0029 #include <linux/firmware.h>
0030 #include <linux/string.h>
0031 #include <linux/debugfs.h>
0032 #include <linux/rculist.h>
0033 #include <linux/remoteproc.h>
0034 #include <linux/iommu.h>
0035 #include <linux/idr.h>
0036 #include <linux/elf.h>
0037 #include <linux/crc32.h>
0038 #include <linux/of_reserved_mem.h>
0039 #include <linux/virtio_ids.h>
0040 #include <linux/virtio_ring.h>
0041 #include <asm/byteorder.h>
0042 #include <linux/platform_device.h>
0043
0044 #include "remoteproc_internal.h"
0045
0046 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
0047
0048 static DEFINE_MUTEX(rproc_list_mutex);
0049 static LIST_HEAD(rproc_list);
0050 static struct notifier_block rproc_panic_nb;
0051
0052 typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
0053 void *, int offset, int avail);
0054
0055 static int rproc_alloc_carveout(struct rproc *rproc,
0056 struct rproc_mem_entry *mem);
0057 static int rproc_release_carveout(struct rproc *rproc,
0058 struct rproc_mem_entry *mem);
0059
0060
0061 static DEFINE_IDA(rproc_dev_index);
0062 static struct workqueue_struct *rproc_recovery_wq;
0063
0064 static const char * const rproc_crash_names[] = {
0065 [RPROC_MMUFAULT] = "mmufault",
0066 [RPROC_WATCHDOG] = "watchdog",
0067 [RPROC_FATAL_ERROR] = "fatal error",
0068 };
0069
0070
0071 static const char *rproc_crash_to_string(enum rproc_crash_type type)
0072 {
0073 if (type < ARRAY_SIZE(rproc_crash_names))
0074 return rproc_crash_names[type];
0075 return "unknown";
0076 }
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
0087 unsigned long iova, int flags, void *token)
0088 {
0089 struct rproc *rproc = token;
0090
0091 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
0092
0093 rproc_report_crash(rproc, RPROC_MMUFAULT);
0094
0095
0096
0097
0098
0099 return -ENOSYS;
0100 }
0101
0102 static int rproc_enable_iommu(struct rproc *rproc)
0103 {
0104 struct iommu_domain *domain;
0105 struct device *dev = rproc->dev.parent;
0106 int ret;
0107
0108 if (!rproc->has_iommu) {
0109 dev_dbg(dev, "iommu not present\n");
0110 return 0;
0111 }
0112
0113 domain = iommu_domain_alloc(dev->bus);
0114 if (!domain) {
0115 dev_err(dev, "can't alloc iommu domain\n");
0116 return -ENOMEM;
0117 }
0118
0119 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
0120
0121 ret = iommu_attach_device(domain, dev);
0122 if (ret) {
0123 dev_err(dev, "can't attach iommu device: %d\n", ret);
0124 goto free_domain;
0125 }
0126
0127 rproc->domain = domain;
0128
0129 return 0;
0130
0131 free_domain:
0132 iommu_domain_free(domain);
0133 return ret;
0134 }
0135
0136 static void rproc_disable_iommu(struct rproc *rproc)
0137 {
0138 struct iommu_domain *domain = rproc->domain;
0139 struct device *dev = rproc->dev.parent;
0140
0141 if (!domain)
0142 return;
0143
0144 iommu_detach_device(domain, dev);
0145 iommu_domain_free(domain);
0146 }
0147
0148 phys_addr_t rproc_va_to_pa(void *cpu_addr)
0149 {
0150
0151
0152
0153
0154
0155 if (is_vmalloc_addr(cpu_addr)) {
0156 return page_to_phys(vmalloc_to_page(cpu_addr)) +
0157 offset_in_page(cpu_addr);
0158 }
0159
0160 WARN_ON(!virt_addr_valid(cpu_addr));
0161 return virt_to_phys(cpu_addr);
0162 }
0163 EXPORT_SYMBOL(rproc_va_to_pa);
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
0196 {
0197 struct rproc_mem_entry *carveout;
0198 void *ptr = NULL;
0199
0200 if (rproc->ops->da_to_va) {
0201 ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
0202 if (ptr)
0203 goto out;
0204 }
0205
0206 list_for_each_entry(carveout, &rproc->carveouts, node) {
0207 int offset = da - carveout->da;
0208
0209
0210 if (!carveout->va)
0211 continue;
0212
0213
0214 if (offset < 0)
0215 continue;
0216
0217
0218 if (offset + len > carveout->len)
0219 continue;
0220
0221 ptr = carveout->va + offset;
0222
0223 if (is_iomem)
0224 *is_iomem = carveout->is_iomem;
0225
0226 break;
0227 }
0228
0229 out:
0230 return ptr;
0231 }
0232 EXPORT_SYMBOL(rproc_da_to_va);
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 __printf(2, 3)
0253 struct rproc_mem_entry *
0254 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
0255 {
0256 va_list args;
0257 char _name[32];
0258 struct rproc_mem_entry *carveout, *mem = NULL;
0259
0260 if (!name)
0261 return NULL;
0262
0263 va_start(args, name);
0264 vsnprintf(_name, sizeof(_name), name, args);
0265 va_end(args);
0266
0267 list_for_each_entry(carveout, &rproc->carveouts, node) {
0268
0269 if (!strcmp(carveout->name, _name)) {
0270 mem = carveout;
0271 break;
0272 }
0273 }
0274
0275 return mem;
0276 }
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 static int rproc_check_carveout_da(struct rproc *rproc,
0293 struct rproc_mem_entry *mem, u32 da, u32 len)
0294 {
0295 struct device *dev = &rproc->dev;
0296 int delta;
0297
0298
0299 if (len > mem->len) {
0300 dev_err(dev, "Registered carveout doesn't fit len request\n");
0301 return -EINVAL;
0302 }
0303
0304 if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
0305
0306 return -EINVAL;
0307 } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
0308 delta = da - mem->da;
0309
0310
0311 if (delta < 0) {
0312 dev_err(dev,
0313 "Registered carveout doesn't fit da request\n");
0314 return -EINVAL;
0315 }
0316
0317 if (delta + len > mem->len) {
0318 dev_err(dev,
0319 "Registered carveout doesn't fit len request\n");
0320 return -EINVAL;
0321 }
0322 }
0323
0324 return 0;
0325 }
0326
0327 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
0328 {
0329 struct rproc *rproc = rvdev->rproc;
0330 struct device *dev = &rproc->dev;
0331 struct rproc_vring *rvring = &rvdev->vring[i];
0332 struct fw_rsc_vdev *rsc;
0333 int ret, notifyid;
0334 struct rproc_mem_entry *mem;
0335 size_t size;
0336
0337
0338 size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
0339
0340 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
0341
0342
0343 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
0344 i);
0345 if (mem) {
0346 if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
0347 return -ENOMEM;
0348 } else {
0349
0350 mem = rproc_mem_entry_init(dev, NULL, 0,
0351 size, rsc->vring[i].da,
0352 rproc_alloc_carveout,
0353 rproc_release_carveout,
0354 "vdev%dvring%d",
0355 rvdev->index, i);
0356 if (!mem) {
0357 dev_err(dev, "Can't allocate memory entry structure\n");
0358 return -ENOMEM;
0359 }
0360
0361 rproc_add_carveout(rproc, mem);
0362 }
0363
0364
0365
0366
0367
0368
0369 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
0370 if (ret < 0) {
0371 dev_err(dev, "idr_alloc failed: %d\n", ret);
0372 return ret;
0373 }
0374 notifyid = ret;
0375
0376
0377 if (notifyid > rproc->max_notifyid)
0378 rproc->max_notifyid = notifyid;
0379
0380 rvring->notifyid = notifyid;
0381
0382
0383 rsc->vring[i].notifyid = notifyid;
0384 return 0;
0385 }
0386
0387 static int
0388 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
0389 {
0390 struct rproc *rproc = rvdev->rproc;
0391 struct device *dev = &rproc->dev;
0392 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
0393 struct rproc_vring *rvring = &rvdev->vring[i];
0394
0395 dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
0396 i, vring->da, vring->num, vring->align);
0397
0398
0399 if (!vring->num || !vring->align) {
0400 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
0401 vring->num, vring->align);
0402 return -EINVAL;
0403 }
0404
0405 rvring->num = vring->num;
0406 rvring->align = vring->align;
0407 rvring->rvdev = rvdev;
0408
0409 return 0;
0410 }
0411
0412 void rproc_free_vring(struct rproc_vring *rvring)
0413 {
0414 struct rproc *rproc = rvring->rvdev->rproc;
0415 int idx = rvring - rvring->rvdev->vring;
0416 struct fw_rsc_vdev *rsc;
0417
0418 idr_remove(&rproc->notifyids, rvring->notifyid);
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 if (rproc->table_ptr) {
0432 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
0433 rsc->vring[idx].da = 0;
0434 rsc->vring[idx].notifyid = -1;
0435 }
0436 }
0437
0438 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
0439 {
0440 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
0441
0442 return rproc_add_virtio_dev(rvdev, rvdev->id);
0443 }
0444
0445 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
0446 {
0447 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
0448 int ret;
0449
0450 ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
0451 if (ret)
0452 dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
0453 }
0454
0455
0456
0457
0458
0459
0460 static void rproc_rvdev_release(struct device *dev)
0461 {
0462 struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
0463
0464 of_reserved_mem_device_release(dev);
0465 dma_release_coherent_memory(dev);
0466
0467 kfree(rvdev);
0468 }
0469
0470 static int copy_dma_range_map(struct device *to, struct device *from)
0471 {
0472 const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
0473 int num_ranges = 0;
0474
0475 if (!map)
0476 return 0;
0477
0478 for (r = map; r->size; r++)
0479 num_ranges++;
0480
0481 new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
0482 GFP_KERNEL);
0483 if (!new_map)
0484 return -ENOMEM;
0485 to->dma_range_map = new_map;
0486 return 0;
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
0518 int offset, int avail)
0519 {
0520 struct fw_rsc_vdev *rsc = ptr;
0521 struct device *dev = &rproc->dev;
0522 struct rproc_vdev *rvdev;
0523 int i, ret;
0524 char name[16];
0525
0526
0527 if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
0528 avail) {
0529 dev_err(dev, "vdev rsc is truncated\n");
0530 return -EINVAL;
0531 }
0532
0533
0534 if (rsc->reserved[0] || rsc->reserved[1]) {
0535 dev_err(dev, "vdev rsc has non zero reserved bytes\n");
0536 return -EINVAL;
0537 }
0538
0539 dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
0540 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
0541
0542
0543 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
0544 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
0545 return -EINVAL;
0546 }
0547
0548 rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
0549 if (!rvdev)
0550 return -ENOMEM;
0551
0552 kref_init(&rvdev->refcount);
0553
0554 rvdev->id = rsc->id;
0555 rvdev->rproc = rproc;
0556 rvdev->index = rproc->nb_vdev++;
0557
0558
0559 snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
0560 rvdev->dev.parent = &rproc->dev;
0561 rvdev->dev.release = rproc_rvdev_release;
0562 dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
0563 dev_set_drvdata(&rvdev->dev, rvdev);
0564
0565 ret = device_register(&rvdev->dev);
0566 if (ret) {
0567 put_device(&rvdev->dev);
0568 return ret;
0569 }
0570
0571 ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent);
0572 if (ret)
0573 goto free_rvdev;
0574
0575
0576 set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
0577
0578 ret = dma_coerce_mask_and_coherent(&rvdev->dev,
0579 dma_get_mask(rproc->dev.parent));
0580 if (ret) {
0581 dev_warn(dev,
0582 "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
0583 dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
0584 }
0585
0586
0587 for (i = 0; i < rsc->num_of_vrings; i++) {
0588 ret = rproc_parse_vring(rvdev, rsc, i);
0589 if (ret)
0590 goto free_rvdev;
0591 }
0592
0593
0594 rvdev->rsc_offset = offset;
0595
0596
0597 for (i = 0; i < rsc->num_of_vrings; i++) {
0598 ret = rproc_alloc_vring(rvdev, i);
0599 if (ret)
0600 goto unwind_vring_allocations;
0601 }
0602
0603 list_add_tail(&rvdev->node, &rproc->rvdevs);
0604
0605 rvdev->subdev.start = rproc_vdev_do_start;
0606 rvdev->subdev.stop = rproc_vdev_do_stop;
0607
0608 rproc_add_subdev(rproc, &rvdev->subdev);
0609
0610 return 0;
0611
0612 unwind_vring_allocations:
0613 for (i--; i >= 0; i--)
0614 rproc_free_vring(&rvdev->vring[i]);
0615 free_rvdev:
0616 device_unregister(&rvdev->dev);
0617 return ret;
0618 }
0619
0620 void rproc_vdev_release(struct kref *ref)
0621 {
0622 struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
0623 struct rproc_vring *rvring;
0624 struct rproc *rproc = rvdev->rproc;
0625 int id;
0626
0627 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
0628 rvring = &rvdev->vring[id];
0629 rproc_free_vring(rvring);
0630 }
0631
0632 rproc_remove_subdev(rproc, &rvdev->subdev);
0633 list_del(&rvdev->node);
0634 device_unregister(&rvdev->dev);
0635 }
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654 static int rproc_handle_trace(struct rproc *rproc, void *ptr,
0655 int offset, int avail)
0656 {
0657 struct fw_rsc_trace *rsc = ptr;
0658 struct rproc_debug_trace *trace;
0659 struct device *dev = &rproc->dev;
0660 char name[15];
0661
0662 if (sizeof(*rsc) > avail) {
0663 dev_err(dev, "trace rsc is truncated\n");
0664 return -EINVAL;
0665 }
0666
0667
0668 if (rsc->reserved) {
0669 dev_err(dev, "trace rsc has non zero reserved bytes\n");
0670 return -EINVAL;
0671 }
0672
0673 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
0674 if (!trace)
0675 return -ENOMEM;
0676
0677
0678 trace->trace_mem.len = rsc->len;
0679 trace->trace_mem.da = rsc->da;
0680
0681
0682 trace->rproc = rproc;
0683
0684
0685 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
0686
0687
0688 trace->tfile = rproc_create_trace_file(name, rproc, trace);
0689
0690 list_add_tail(&trace->node, &rproc->traces);
0691
0692 rproc->num_traces++;
0693
0694 dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
0695 name, rsc->da, rsc->len);
0696
0697 return 0;
0698 }
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
0729 int offset, int avail)
0730 {
0731 struct fw_rsc_devmem *rsc = ptr;
0732 struct rproc_mem_entry *mapping;
0733 struct device *dev = &rproc->dev;
0734 int ret;
0735
0736
0737 if (!rproc->domain)
0738 return -EINVAL;
0739
0740 if (sizeof(*rsc) > avail) {
0741 dev_err(dev, "devmem rsc is truncated\n");
0742 return -EINVAL;
0743 }
0744
0745
0746 if (rsc->reserved) {
0747 dev_err(dev, "devmem rsc has non zero reserved bytes\n");
0748 return -EINVAL;
0749 }
0750
0751 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
0752 if (!mapping)
0753 return -ENOMEM;
0754
0755 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
0756 if (ret) {
0757 dev_err(dev, "failed to map devmem: %d\n", ret);
0758 goto out;
0759 }
0760
0761
0762
0763
0764
0765
0766
0767
0768 mapping->da = rsc->da;
0769 mapping->len = rsc->len;
0770 list_add_tail(&mapping->node, &rproc->mappings);
0771
0772 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
0773 rsc->pa, rsc->da, rsc->len);
0774
0775 return 0;
0776
0777 out:
0778 kfree(mapping);
0779 return ret;
0780 }
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792 static int rproc_alloc_carveout(struct rproc *rproc,
0793 struct rproc_mem_entry *mem)
0794 {
0795 struct rproc_mem_entry *mapping = NULL;
0796 struct device *dev = &rproc->dev;
0797 dma_addr_t dma;
0798 void *va;
0799 int ret;
0800
0801 va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
0802 if (!va) {
0803 dev_err(dev->parent,
0804 "failed to allocate dma memory: len 0x%zx\n",
0805 mem->len);
0806 return -ENOMEM;
0807 }
0808
0809 dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
0810 va, &dma, mem->len);
0811
0812 if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
0813
0814
0815
0816
0817
0818
0819 if (mem->da != (u32)dma)
0820 dev_warn(dev->parent,
0821 "Allocated carveout doesn't fit device address request\n");
0822 }
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
0842 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
0843 if (!mapping) {
0844 ret = -ENOMEM;
0845 goto dma_free;
0846 }
0847
0848 ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
0849 mem->flags);
0850 if (ret) {
0851 dev_err(dev, "iommu_map failed: %d\n", ret);
0852 goto free_mapping;
0853 }
0854
0855
0856
0857
0858
0859
0860
0861
0862 mapping->da = mem->da;
0863 mapping->len = mem->len;
0864 list_add_tail(&mapping->node, &rproc->mappings);
0865
0866 dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
0867 mem->da, &dma);
0868 }
0869
0870 if (mem->da == FW_RSC_ADDR_ANY) {
0871
0872 if ((u64)dma & HIGH_BITS_MASK)
0873 dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
0874
0875 mem->da = (u32)dma;
0876 }
0877
0878 mem->dma = dma;
0879 mem->va = va;
0880
0881 return 0;
0882
0883 free_mapping:
0884 kfree(mapping);
0885 dma_free:
0886 dma_free_coherent(dev->parent, mem->len, va, dma);
0887 return ret;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 static int rproc_release_carveout(struct rproc *rproc,
0901 struct rproc_mem_entry *mem)
0902 {
0903 struct device *dev = &rproc->dev;
0904
0905
0906 dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
0907 return 0;
0908 }
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931 static int rproc_handle_carveout(struct rproc *rproc,
0932 void *ptr, int offset, int avail)
0933 {
0934 struct fw_rsc_carveout *rsc = ptr;
0935 struct rproc_mem_entry *carveout;
0936 struct device *dev = &rproc->dev;
0937
0938 if (sizeof(*rsc) > avail) {
0939 dev_err(dev, "carveout rsc is truncated\n");
0940 return -EINVAL;
0941 }
0942
0943
0944 if (rsc->reserved) {
0945 dev_err(dev, "carveout rsc has non zero reserved bytes\n");
0946 return -EINVAL;
0947 }
0948
0949 dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
0950 rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
0951
0952
0953
0954
0955
0956 carveout = rproc_find_carveout_by_name(rproc, rsc->name);
0957
0958 if (carveout) {
0959 if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
0960 dev_err(dev,
0961 "Carveout already associated to resource table\n");
0962 return -ENOMEM;
0963 }
0964
0965 if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
0966 return -ENOMEM;
0967
0968
0969 carveout->rsc_offset = offset;
0970 carveout->flags = rsc->flags;
0971
0972 return 0;
0973 }
0974
0975
0976 carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
0977 rproc_alloc_carveout,
0978 rproc_release_carveout, rsc->name);
0979 if (!carveout) {
0980 dev_err(dev, "Can't allocate memory entry structure\n");
0981 return -ENOMEM;
0982 }
0983
0984 carveout->flags = rsc->flags;
0985 carveout->rsc_offset = offset;
0986 rproc_add_carveout(rproc, carveout);
0987
0988 return 0;
0989 }
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999 void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
1000 {
1001 list_add_tail(&mem->node, &rproc->carveouts);
1002 }
1003 EXPORT_SYMBOL(rproc_add_carveout);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 __printf(8, 9)
1022 struct rproc_mem_entry *
1023 rproc_mem_entry_init(struct device *dev,
1024 void *va, dma_addr_t dma, size_t len, u32 da,
1025 int (*alloc)(struct rproc *, struct rproc_mem_entry *),
1026 int (*release)(struct rproc *, struct rproc_mem_entry *),
1027 const char *name, ...)
1028 {
1029 struct rproc_mem_entry *mem;
1030 va_list args;
1031
1032 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1033 if (!mem)
1034 return mem;
1035
1036 mem->va = va;
1037 mem->dma = dma;
1038 mem->da = da;
1039 mem->len = len;
1040 mem->alloc = alloc;
1041 mem->release = release;
1042 mem->rsc_offset = FW_RSC_ADDR_ANY;
1043 mem->of_resm_idx = -1;
1044
1045 va_start(args, name);
1046 vsnprintf(mem->name, sizeof(mem->name), name, args);
1047 va_end(args);
1048
1049 return mem;
1050 }
1051 EXPORT_SYMBOL(rproc_mem_entry_init);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 __printf(5, 6)
1068 struct rproc_mem_entry *
1069 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
1070 u32 da, const char *name, ...)
1071 {
1072 struct rproc_mem_entry *mem;
1073 va_list args;
1074
1075 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1076 if (!mem)
1077 return mem;
1078
1079 mem->da = da;
1080 mem->len = len;
1081 mem->rsc_offset = FW_RSC_ADDR_ANY;
1082 mem->of_resm_idx = of_resm_idx;
1083
1084 va_start(args, name);
1085 vsnprintf(mem->name, sizeof(mem->name), name, args);
1086 va_end(args);
1087
1088 return mem;
1089 }
1090 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
1106 {
1107 int ret;
1108
1109 ret = of_property_read_string_index(dev->of_node, "firmware-name",
1110 index, fw_name);
1111 return ret ? ret : 0;
1112 }
1113 EXPORT_SYMBOL(rproc_of_parse_firmware);
1114
1115
1116
1117
1118
1119 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
1120 [RSC_CARVEOUT] = rproc_handle_carveout,
1121 [RSC_DEVMEM] = rproc_handle_devmem,
1122 [RSC_TRACE] = rproc_handle_trace,
1123 [RSC_VDEV] = rproc_handle_vdev,
1124 };
1125
1126
1127 static int rproc_handle_resources(struct rproc *rproc,
1128 rproc_handle_resource_t handlers[RSC_LAST])
1129 {
1130 struct device *dev = &rproc->dev;
1131 rproc_handle_resource_t handler;
1132 int ret = 0, i;
1133
1134 if (!rproc->table_ptr)
1135 return 0;
1136
1137 for (i = 0; i < rproc->table_ptr->num; i++) {
1138 int offset = rproc->table_ptr->offset[i];
1139 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
1140 int avail = rproc->table_sz - offset - sizeof(*hdr);
1141 void *rsc = (void *)hdr + sizeof(*hdr);
1142
1143
1144 if (avail < 0) {
1145 dev_err(dev, "rsc table is truncated\n");
1146 return -EINVAL;
1147 }
1148
1149 dev_dbg(dev, "rsc: type %d\n", hdr->type);
1150
1151 if (hdr->type >= RSC_VENDOR_START &&
1152 hdr->type <= RSC_VENDOR_END) {
1153 ret = rproc_handle_rsc(rproc, hdr->type, rsc,
1154 offset + sizeof(*hdr), avail);
1155 if (ret == RSC_HANDLED)
1156 continue;
1157 else if (ret < 0)
1158 break;
1159
1160 dev_warn(dev, "unsupported vendor resource %d\n",
1161 hdr->type);
1162 continue;
1163 }
1164
1165 if (hdr->type >= RSC_LAST) {
1166 dev_warn(dev, "unsupported resource %d\n", hdr->type);
1167 continue;
1168 }
1169
1170 handler = handlers[hdr->type];
1171 if (!handler)
1172 continue;
1173
1174 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
1175 if (ret)
1176 break;
1177 }
1178
1179 return ret;
1180 }
1181
1182 static int rproc_prepare_subdevices(struct rproc *rproc)
1183 {
1184 struct rproc_subdev *subdev;
1185 int ret;
1186
1187 list_for_each_entry(subdev, &rproc->subdevs, node) {
1188 if (subdev->prepare) {
1189 ret = subdev->prepare(subdev);
1190 if (ret)
1191 goto unroll_preparation;
1192 }
1193 }
1194
1195 return 0;
1196
1197 unroll_preparation:
1198 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1199 if (subdev->unprepare)
1200 subdev->unprepare(subdev);
1201 }
1202
1203 return ret;
1204 }
1205
1206 static int rproc_start_subdevices(struct rproc *rproc)
1207 {
1208 struct rproc_subdev *subdev;
1209 int ret;
1210
1211 list_for_each_entry(subdev, &rproc->subdevs, node) {
1212 if (subdev->start) {
1213 ret = subdev->start(subdev);
1214 if (ret)
1215 goto unroll_registration;
1216 }
1217 }
1218
1219 return 0;
1220
1221 unroll_registration:
1222 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1223 if (subdev->stop)
1224 subdev->stop(subdev, true);
1225 }
1226
1227 return ret;
1228 }
1229
1230 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
1231 {
1232 struct rproc_subdev *subdev;
1233
1234 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1235 if (subdev->stop)
1236 subdev->stop(subdev, crashed);
1237 }
1238 }
1239
1240 static void rproc_unprepare_subdevices(struct rproc *rproc)
1241 {
1242 struct rproc_subdev *subdev;
1243
1244 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1245 if (subdev->unprepare)
1246 subdev->unprepare(subdev);
1247 }
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 static int rproc_alloc_registered_carveouts(struct rproc *rproc)
1262 {
1263 struct rproc_mem_entry *entry, *tmp;
1264 struct fw_rsc_carveout *rsc;
1265 struct device *dev = &rproc->dev;
1266 u64 pa;
1267 int ret;
1268
1269 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1270 if (entry->alloc) {
1271 ret = entry->alloc(rproc, entry);
1272 if (ret) {
1273 dev_err(dev, "Unable to allocate carveout %s: %d\n",
1274 entry->name, ret);
1275 return -ENOMEM;
1276 }
1277 }
1278
1279 if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
1280
1281 rsc = (void *)rproc->table_ptr + entry->rsc_offset;
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 if (entry->va)
1303 pa = (u64)rproc_va_to_pa(entry->va);
1304 else
1305 pa = (u64)entry->dma;
1306
1307 if (((u64)pa) & HIGH_BITS_MASK)
1308 dev_warn(dev,
1309 "Physical address cast in 32bit to fit resource table format\n");
1310
1311 rsc->pa = (u32)pa;
1312 rsc->da = entry->da;
1313 rsc->len = entry->len;
1314 }
1315 }
1316
1317 return 0;
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 void rproc_resource_cleanup(struct rproc *rproc)
1329 {
1330 struct rproc_mem_entry *entry, *tmp;
1331 struct rproc_debug_trace *trace, *ttmp;
1332 struct rproc_vdev *rvdev, *rvtmp;
1333 struct device *dev = &rproc->dev;
1334
1335
1336 list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
1337 rproc_remove_trace_file(trace->tfile);
1338 rproc->num_traces--;
1339 list_del(&trace->node);
1340 kfree(trace);
1341 }
1342
1343
1344 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
1345 size_t unmapped;
1346
1347 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
1348 if (unmapped != entry->len) {
1349
1350 dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
1351 unmapped);
1352 }
1353
1354 list_del(&entry->node);
1355 kfree(entry);
1356 }
1357
1358
1359 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1360 if (entry->release)
1361 entry->release(rproc, entry);
1362 list_del(&entry->node);
1363 kfree(entry);
1364 }
1365
1366
1367 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
1368 kref_put(&rvdev->refcount, rproc_vdev_release);
1369
1370 rproc_coredump_cleanup(rproc);
1371 }
1372 EXPORT_SYMBOL(rproc_resource_cleanup);
1373
1374 static int rproc_start(struct rproc *rproc, const struct firmware *fw)
1375 {
1376 struct resource_table *loaded_table;
1377 struct device *dev = &rproc->dev;
1378 int ret;
1379
1380
1381 ret = rproc_load_segments(rproc, fw);
1382 if (ret) {
1383 dev_err(dev, "Failed to load program segments: %d\n", ret);
1384 return ret;
1385 }
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
1396 if (loaded_table) {
1397 memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
1398 rproc->table_ptr = loaded_table;
1399 }
1400
1401 ret = rproc_prepare_subdevices(rproc);
1402 if (ret) {
1403 dev_err(dev, "failed to prepare subdevices for %s: %d\n",
1404 rproc->name, ret);
1405 goto reset_table_ptr;
1406 }
1407
1408
1409 ret = rproc->ops->start(rproc);
1410 if (ret) {
1411 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
1412 goto unprepare_subdevices;
1413 }
1414
1415
1416 ret = rproc_start_subdevices(rproc);
1417 if (ret) {
1418 dev_err(dev, "failed to probe subdevices for %s: %d\n",
1419 rproc->name, ret);
1420 goto stop_rproc;
1421 }
1422
1423 rproc->state = RPROC_RUNNING;
1424
1425 dev_info(dev, "remote processor %s is now up\n", rproc->name);
1426
1427 return 0;
1428
1429 stop_rproc:
1430 rproc->ops->stop(rproc);
1431 unprepare_subdevices:
1432 rproc_unprepare_subdevices(rproc);
1433 reset_table_ptr:
1434 rproc->table_ptr = rproc->cached_table;
1435
1436 return ret;
1437 }
1438
1439 static int __rproc_attach(struct rproc *rproc)
1440 {
1441 struct device *dev = &rproc->dev;
1442 int ret;
1443
1444 ret = rproc_prepare_subdevices(rproc);
1445 if (ret) {
1446 dev_err(dev, "failed to prepare subdevices for %s: %d\n",
1447 rproc->name, ret);
1448 goto out;
1449 }
1450
1451
1452 ret = rproc_attach_device(rproc);
1453 if (ret) {
1454 dev_err(dev, "can't attach to rproc %s: %d\n",
1455 rproc->name, ret);
1456 goto unprepare_subdevices;
1457 }
1458
1459
1460 ret = rproc_start_subdevices(rproc);
1461 if (ret) {
1462 dev_err(dev, "failed to probe subdevices for %s: %d\n",
1463 rproc->name, ret);
1464 goto stop_rproc;
1465 }
1466
1467 rproc->state = RPROC_ATTACHED;
1468
1469 dev_info(dev, "remote processor %s is now attached\n", rproc->name);
1470
1471 return 0;
1472
1473 stop_rproc:
1474 rproc->ops->stop(rproc);
1475 unprepare_subdevices:
1476 rproc_unprepare_subdevices(rproc);
1477 out:
1478 return ret;
1479 }
1480
1481
1482
1483
1484 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1485 {
1486 struct device *dev = &rproc->dev;
1487 const char *name = rproc->firmware;
1488 int ret;
1489
1490 ret = rproc_fw_sanity_check(rproc, fw);
1491 if (ret)
1492 return ret;
1493
1494 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
1495
1496
1497
1498
1499
1500 ret = rproc_enable_iommu(rproc);
1501 if (ret) {
1502 dev_err(dev, "can't enable iommu: %d\n", ret);
1503 return ret;
1504 }
1505
1506
1507 ret = rproc_prepare_device(rproc);
1508 if (ret) {
1509 dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
1510 goto disable_iommu;
1511 }
1512
1513 rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
1514
1515
1516 ret = rproc_parse_fw(rproc, fw);
1517 if (ret)
1518 goto unprepare_rproc;
1519
1520
1521 rproc->max_notifyid = -1;
1522
1523
1524 rproc->nb_vdev = 0;
1525
1526
1527 ret = rproc_handle_resources(rproc, rproc_loading_handlers);
1528 if (ret) {
1529 dev_err(dev, "Failed to process resources: %d\n", ret);
1530 goto clean_up_resources;
1531 }
1532
1533
1534 ret = rproc_alloc_registered_carveouts(rproc);
1535 if (ret) {
1536 dev_err(dev, "Failed to allocate associated carveouts: %d\n",
1537 ret);
1538 goto clean_up_resources;
1539 }
1540
1541 ret = rproc_start(rproc, fw);
1542 if (ret)
1543 goto clean_up_resources;
1544
1545 return 0;
1546
1547 clean_up_resources:
1548 rproc_resource_cleanup(rproc);
1549 kfree(rproc->cached_table);
1550 rproc->cached_table = NULL;
1551 rproc->table_ptr = NULL;
1552 unprepare_rproc:
1553
1554 rproc_unprepare_device(rproc);
1555 disable_iommu:
1556 rproc_disable_iommu(rproc);
1557 return ret;
1558 }
1559
1560 static int rproc_set_rsc_table(struct rproc *rproc)
1561 {
1562 struct resource_table *table_ptr;
1563 struct device *dev = &rproc->dev;
1564 size_t table_sz;
1565 int ret;
1566
1567 table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
1568 if (!table_ptr) {
1569
1570 return 0;
1571 }
1572
1573 if (IS_ERR(table_ptr)) {
1574 ret = PTR_ERR(table_ptr);
1575 dev_err(dev, "can't load resource table: %d\n", ret);
1576 return ret;
1577 }
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589 if (rproc->ops->detach) {
1590 rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
1591 if (!rproc->clean_table)
1592 return -ENOMEM;
1593 } else {
1594 rproc->clean_table = NULL;
1595 }
1596
1597 rproc->cached_table = NULL;
1598 rproc->table_ptr = table_ptr;
1599 rproc->table_sz = table_sz;
1600
1601 return 0;
1602 }
1603
1604 static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
1605 {
1606 struct resource_table *table_ptr;
1607
1608
1609 if (!rproc->table_ptr)
1610 return 0;
1611
1612
1613
1614
1615
1616
1617 if (WARN_ON(!rproc->clean_table))
1618 return -EINVAL;
1619
1620
1621 table_ptr = rproc->table_ptr;
1622
1623
1624
1625
1626
1627
1628
1629
1630 rproc->cached_table = kmemdup(rproc->table_ptr,
1631 rproc->table_sz, GFP_KERNEL);
1632 if (!rproc->cached_table)
1633 return -ENOMEM;
1634
1635
1636
1637
1638
1639 rproc->table_ptr = rproc->cached_table;
1640
1641
1642
1643
1644
1645
1646 memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
1647
1648
1649
1650
1651
1652 kfree(rproc->clean_table);
1653
1654 return 0;
1655 }
1656
1657 static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
1658 {
1659
1660 if (!rproc->table_ptr)
1661 return 0;
1662
1663
1664
1665
1666
1667
1668 if (rproc->cached_table)
1669 goto out;
1670
1671
1672
1673
1674
1675
1676
1677
1678 rproc->cached_table = kmemdup(rproc->table_ptr,
1679 rproc->table_sz, GFP_KERNEL);
1680 if (!rproc->cached_table)
1681 return -ENOMEM;
1682
1683
1684
1685
1686
1687 kfree(rproc->clean_table);
1688
1689 out:
1690
1691
1692
1693
1694 rproc->table_ptr = rproc->cached_table;
1695 return 0;
1696 }
1697
1698
1699
1700
1701
1702 static int rproc_attach(struct rproc *rproc)
1703 {
1704 struct device *dev = &rproc->dev;
1705 int ret;
1706
1707
1708
1709
1710
1711 ret = rproc_enable_iommu(rproc);
1712 if (ret) {
1713 dev_err(dev, "can't enable iommu: %d\n", ret);
1714 return ret;
1715 }
1716
1717
1718 ret = rproc_prepare_device(rproc);
1719 if (ret) {
1720 dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
1721 goto disable_iommu;
1722 }
1723
1724 ret = rproc_set_rsc_table(rproc);
1725 if (ret) {
1726 dev_err(dev, "can't load resource table: %d\n", ret);
1727 goto unprepare_device;
1728 }
1729
1730
1731 rproc->max_notifyid = -1;
1732
1733
1734 rproc->nb_vdev = 0;
1735
1736
1737
1738
1739
1740
1741 ret = rproc_handle_resources(rproc, rproc_loading_handlers);
1742 if (ret) {
1743 dev_err(dev, "Failed to process resources: %d\n", ret);
1744 goto unprepare_device;
1745 }
1746
1747
1748 ret = rproc_alloc_registered_carveouts(rproc);
1749 if (ret) {
1750 dev_err(dev, "Failed to allocate associated carveouts: %d\n",
1751 ret);
1752 goto clean_up_resources;
1753 }
1754
1755 ret = __rproc_attach(rproc);
1756 if (ret)
1757 goto clean_up_resources;
1758
1759 return 0;
1760
1761 clean_up_resources:
1762 rproc_resource_cleanup(rproc);
1763 unprepare_device:
1764
1765 rproc_unprepare_device(rproc);
1766 disable_iommu:
1767 rproc_disable_iommu(rproc);
1768 return ret;
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
1780 {
1781 struct rproc *rproc = context;
1782
1783 rproc_boot(rproc);
1784
1785 release_firmware(fw);
1786 }
1787
1788 static int rproc_trigger_auto_boot(struct rproc *rproc)
1789 {
1790 int ret;
1791
1792
1793
1794
1795
1796
1797
1798 if (rproc->state == RPROC_DETACHED)
1799 return rproc_boot(rproc);
1800
1801
1802
1803
1804
1805 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
1806 rproc->firmware, &rproc->dev, GFP_KERNEL,
1807 rproc, rproc_auto_boot_callback);
1808 if (ret < 0)
1809 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
1810
1811 return ret;
1812 }
1813
1814 static int rproc_stop(struct rproc *rproc, bool crashed)
1815 {
1816 struct device *dev = &rproc->dev;
1817 int ret;
1818
1819
1820 if (!rproc->ops->stop)
1821 return -EINVAL;
1822
1823
1824 rproc_stop_subdevices(rproc, crashed);
1825
1826
1827 ret = rproc_reset_rsc_table_on_stop(rproc);
1828 if (ret) {
1829 dev_err(dev, "can't reset resource table: %d\n", ret);
1830 return ret;
1831 }
1832
1833
1834
1835 ret = rproc->ops->stop(rproc);
1836 if (ret) {
1837 dev_err(dev, "can't stop rproc: %d\n", ret);
1838 return ret;
1839 }
1840
1841 rproc_unprepare_subdevices(rproc);
1842
1843 rproc->state = RPROC_OFFLINE;
1844
1845 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1846
1847 return 0;
1848 }
1849
1850
1851
1852
1853 static int __rproc_detach(struct rproc *rproc)
1854 {
1855 struct device *dev = &rproc->dev;
1856 int ret;
1857
1858
1859 if (!rproc->ops->detach)
1860 return -EINVAL;
1861
1862
1863 rproc_stop_subdevices(rproc, false);
1864
1865
1866 ret = rproc_reset_rsc_table_on_detach(rproc);
1867 if (ret) {
1868 dev_err(dev, "can't reset resource table: %d\n", ret);
1869 return ret;
1870 }
1871
1872
1873 ret = rproc->ops->detach(rproc);
1874 if (ret) {
1875 dev_err(dev, "can't detach from rproc: %d\n", ret);
1876 return ret;
1877 }
1878
1879 rproc_unprepare_subdevices(rproc);
1880
1881 rproc->state = RPROC_DETACHED;
1882
1883 dev_info(dev, "detached remote processor %s\n", rproc->name);
1884
1885 return 0;
1886 }
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 int rproc_trigger_recovery(struct rproc *rproc)
1901 {
1902 const struct firmware *firmware_p;
1903 struct device *dev = &rproc->dev;
1904 int ret;
1905
1906 ret = mutex_lock_interruptible(&rproc->lock);
1907 if (ret)
1908 return ret;
1909
1910
1911 if (rproc->state != RPROC_CRASHED)
1912 goto unlock_mutex;
1913
1914 dev_err(dev, "recovering %s\n", rproc->name);
1915
1916 ret = rproc_stop(rproc, true);
1917 if (ret)
1918 goto unlock_mutex;
1919
1920
1921 rproc->ops->coredump(rproc);
1922
1923
1924 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1925 if (ret < 0) {
1926 dev_err(dev, "request_firmware failed: %d\n", ret);
1927 goto unlock_mutex;
1928 }
1929
1930
1931 ret = rproc_start(rproc, firmware_p);
1932
1933 release_firmware(firmware_p);
1934
1935 unlock_mutex:
1936 mutex_unlock(&rproc->lock);
1937 return ret;
1938 }
1939
1940
1941
1942
1943
1944
1945
1946
1947 static void rproc_crash_handler_work(struct work_struct *work)
1948 {
1949 struct rproc *rproc = container_of(work, struct rproc, crash_handler);
1950 struct device *dev = &rproc->dev;
1951
1952 dev_dbg(dev, "enter %s\n", __func__);
1953
1954 mutex_lock(&rproc->lock);
1955
1956 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
1957
1958 mutex_unlock(&rproc->lock);
1959 return;
1960 }
1961
1962 rproc->state = RPROC_CRASHED;
1963 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
1964 rproc->name);
1965
1966 mutex_unlock(&rproc->lock);
1967
1968 if (!rproc->recovery_disabled)
1969 rproc_trigger_recovery(rproc);
1970
1971 pm_relax(rproc->dev.parent);
1972 }
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985 int rproc_boot(struct rproc *rproc)
1986 {
1987 const struct firmware *firmware_p;
1988 struct device *dev;
1989 int ret;
1990
1991 if (!rproc) {
1992 pr_err("invalid rproc handle\n");
1993 return -EINVAL;
1994 }
1995
1996 dev = &rproc->dev;
1997
1998 ret = mutex_lock_interruptible(&rproc->lock);
1999 if (ret) {
2000 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
2001 return ret;
2002 }
2003
2004 if (rproc->state == RPROC_DELETED) {
2005 ret = -ENODEV;
2006 dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
2007 goto unlock_mutex;
2008 }
2009
2010
2011 if (atomic_inc_return(&rproc->power) > 1) {
2012 ret = 0;
2013 goto unlock_mutex;
2014 }
2015
2016 if (rproc->state == RPROC_DETACHED) {
2017 dev_info(dev, "attaching to %s\n", rproc->name);
2018
2019 ret = rproc_attach(rproc);
2020 } else {
2021 dev_info(dev, "powering up %s\n", rproc->name);
2022
2023
2024 ret = request_firmware(&firmware_p, rproc->firmware, dev);
2025 if (ret < 0) {
2026 dev_err(dev, "request_firmware failed: %d\n", ret);
2027 goto downref_rproc;
2028 }
2029
2030 ret = rproc_fw_boot(rproc, firmware_p);
2031
2032 release_firmware(firmware_p);
2033 }
2034
2035 downref_rproc:
2036 if (ret)
2037 atomic_dec(&rproc->power);
2038 unlock_mutex:
2039 mutex_unlock(&rproc->lock);
2040 return ret;
2041 }
2042 EXPORT_SYMBOL(rproc_boot);
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 int rproc_shutdown(struct rproc *rproc)
2066 {
2067 struct device *dev = &rproc->dev;
2068 int ret = 0;
2069
2070 ret = mutex_lock_interruptible(&rproc->lock);
2071 if (ret) {
2072 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
2073 return ret;
2074 }
2075
2076 if (rproc->state != RPROC_RUNNING &&
2077 rproc->state != RPROC_ATTACHED) {
2078 ret = -EINVAL;
2079 goto out;
2080 }
2081
2082
2083 if (!atomic_dec_and_test(&rproc->power))
2084 goto out;
2085
2086 ret = rproc_stop(rproc, false);
2087 if (ret) {
2088 atomic_inc(&rproc->power);
2089 goto out;
2090 }
2091
2092
2093 rproc_resource_cleanup(rproc);
2094
2095
2096 rproc_unprepare_device(rproc);
2097
2098 rproc_disable_iommu(rproc);
2099
2100
2101 kfree(rproc->cached_table);
2102 rproc->cached_table = NULL;
2103 rproc->table_ptr = NULL;
2104 out:
2105 mutex_unlock(&rproc->lock);
2106 return ret;
2107 }
2108 EXPORT_SYMBOL(rproc_shutdown);
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 int rproc_detach(struct rproc *rproc)
2131 {
2132 struct device *dev = &rproc->dev;
2133 int ret;
2134
2135 ret = mutex_lock_interruptible(&rproc->lock);
2136 if (ret) {
2137 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
2138 return ret;
2139 }
2140
2141 if (rproc->state != RPROC_ATTACHED) {
2142 ret = -EINVAL;
2143 goto out;
2144 }
2145
2146
2147 if (!atomic_dec_and_test(&rproc->power)) {
2148 ret = 0;
2149 goto out;
2150 }
2151
2152 ret = __rproc_detach(rproc);
2153 if (ret) {
2154 atomic_inc(&rproc->power);
2155 goto out;
2156 }
2157
2158
2159 rproc_resource_cleanup(rproc);
2160
2161
2162 rproc_unprepare_device(rproc);
2163
2164 rproc_disable_iommu(rproc);
2165
2166
2167 kfree(rproc->cached_table);
2168 rproc->cached_table = NULL;
2169 rproc->table_ptr = NULL;
2170 out:
2171 mutex_unlock(&rproc->lock);
2172 return ret;
2173 }
2174 EXPORT_SYMBOL(rproc_detach);
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188 #ifdef CONFIG_OF
2189 struct rproc *rproc_get_by_phandle(phandle phandle)
2190 {
2191 struct rproc *rproc = NULL, *r;
2192 struct device_node *np;
2193
2194 np = of_find_node_by_phandle(phandle);
2195 if (!np)
2196 return NULL;
2197
2198 rcu_read_lock();
2199 list_for_each_entry_rcu(r, &rproc_list, node) {
2200 if (r->dev.parent && r->dev.parent->of_node == np) {
2201
2202 if (!try_module_get(r->dev.parent->driver->owner)) {
2203 dev_err(&r->dev, "can't get owner\n");
2204 break;
2205 }
2206
2207 rproc = r;
2208 get_device(&rproc->dev);
2209 break;
2210 }
2211 }
2212 rcu_read_unlock();
2213
2214 of_node_put(np);
2215
2216 return rproc;
2217 }
2218 #else
2219 struct rproc *rproc_get_by_phandle(phandle phandle)
2220 {
2221 return NULL;
2222 }
2223 #endif
2224 EXPORT_SYMBOL(rproc_get_by_phandle);
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244 int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
2245 {
2246 struct device *dev;
2247 int ret, len;
2248 char *p;
2249
2250 if (!rproc || !fw_name)
2251 return -EINVAL;
2252
2253 dev = rproc->dev.parent;
2254
2255 ret = mutex_lock_interruptible(&rproc->lock);
2256 if (ret) {
2257 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
2258 return -EINVAL;
2259 }
2260
2261 if (rproc->state != RPROC_OFFLINE) {
2262 dev_err(dev, "can't change firmware while running\n");
2263 ret = -EBUSY;
2264 goto out;
2265 }
2266
2267 len = strcspn(fw_name, "\n");
2268 if (!len) {
2269 dev_err(dev, "can't provide empty string for firmware name\n");
2270 ret = -EINVAL;
2271 goto out;
2272 }
2273
2274 p = kstrndup(fw_name, len, GFP_KERNEL);
2275 if (!p) {
2276 ret = -ENOMEM;
2277 goto out;
2278 }
2279
2280 kfree_const(rproc->firmware);
2281 rproc->firmware = p;
2282
2283 out:
2284 mutex_unlock(&rproc->lock);
2285 return ret;
2286 }
2287 EXPORT_SYMBOL(rproc_set_firmware);
2288
2289 static int rproc_validate(struct rproc *rproc)
2290 {
2291 switch (rproc->state) {
2292 case RPROC_OFFLINE:
2293
2294
2295
2296
2297 if (!rproc->ops->start)
2298 return -EINVAL;
2299 break;
2300 case RPROC_DETACHED:
2301
2302
2303
2304
2305 if (!rproc->ops->attach)
2306 return -EINVAL;
2307
2308
2309
2310
2311
2312 if (rproc->cached_table)
2313 return -EINVAL;
2314 break;
2315 default:
2316
2317
2318
2319
2320 return -EINVAL;
2321 }
2322
2323 return 0;
2324 }
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346 int rproc_add(struct rproc *rproc)
2347 {
2348 struct device *dev = &rproc->dev;
2349 int ret;
2350
2351 ret = rproc_validate(rproc);
2352 if (ret < 0)
2353 return ret;
2354
2355
2356 ret = rproc_char_device_add(rproc);
2357 if (ret < 0)
2358 return ret;
2359
2360 ret = device_add(dev);
2361 if (ret < 0) {
2362 put_device(dev);
2363 goto rproc_remove_cdev;
2364 }
2365
2366 dev_info(dev, "%s is available\n", rproc->name);
2367
2368
2369 rproc_create_debug_dir(rproc);
2370
2371
2372 if (rproc->auto_boot) {
2373 ret = rproc_trigger_auto_boot(rproc);
2374 if (ret < 0)
2375 goto rproc_remove_dev;
2376 }
2377
2378
2379 mutex_lock(&rproc_list_mutex);
2380 list_add_rcu(&rproc->node, &rproc_list);
2381 mutex_unlock(&rproc_list_mutex);
2382
2383 return 0;
2384
2385 rproc_remove_dev:
2386 rproc_delete_debug_dir(rproc);
2387 device_del(dev);
2388 rproc_remove_cdev:
2389 rproc_char_device_remove(rproc);
2390 return ret;
2391 }
2392 EXPORT_SYMBOL(rproc_add);
2393
2394 static void devm_rproc_remove(void *rproc)
2395 {
2396 rproc_del(rproc);
2397 }
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409 int devm_rproc_add(struct device *dev, struct rproc *rproc)
2410 {
2411 int err;
2412
2413 err = rproc_add(rproc);
2414 if (err)
2415 return err;
2416
2417 return devm_add_action_or_reset(dev, devm_rproc_remove, rproc);
2418 }
2419 EXPORT_SYMBOL(devm_rproc_add);
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430 static void rproc_type_release(struct device *dev)
2431 {
2432 struct rproc *rproc = container_of(dev, struct rproc, dev);
2433
2434 dev_info(&rproc->dev, "releasing %s\n", rproc->name);
2435
2436 idr_destroy(&rproc->notifyids);
2437
2438 if (rproc->index >= 0)
2439 ida_free(&rproc_dev_index, rproc->index);
2440
2441 kfree_const(rproc->firmware);
2442 kfree_const(rproc->name);
2443 kfree(rproc->ops);
2444 kfree(rproc);
2445 }
2446
2447 static const struct device_type rproc_type = {
2448 .name = "remoteproc",
2449 .release = rproc_type_release,
2450 };
2451
2452 static int rproc_alloc_firmware(struct rproc *rproc,
2453 const char *name, const char *firmware)
2454 {
2455 const char *p;
2456
2457
2458
2459
2460
2461 if (firmware)
2462 p = kstrdup_const(firmware, GFP_KERNEL);
2463 else
2464 p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name);
2465
2466 if (!p)
2467 return -ENOMEM;
2468
2469 rproc->firmware = p;
2470
2471 return 0;
2472 }
2473
2474 static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
2475 {
2476 rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
2477 if (!rproc->ops)
2478 return -ENOMEM;
2479
2480
2481 if (!rproc->ops->coredump)
2482 rproc->ops->coredump = rproc_coredump;
2483
2484 if (rproc->ops->load)
2485 return 0;
2486
2487
2488 rproc->ops->load = rproc_elf_load_segments;
2489 rproc->ops->parse_fw = rproc_elf_load_rsc_table;
2490 rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
2491 rproc->ops->sanity_check = rproc_elf_sanity_check;
2492 rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
2493
2494 return 0;
2495 }
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520 struct rproc *rproc_alloc(struct device *dev, const char *name,
2521 const struct rproc_ops *ops,
2522 const char *firmware, int len)
2523 {
2524 struct rproc *rproc;
2525
2526 if (!dev || !name || !ops)
2527 return NULL;
2528
2529 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
2530 if (!rproc)
2531 return NULL;
2532
2533 rproc->priv = &rproc[1];
2534 rproc->auto_boot = true;
2535 rproc->elf_class = ELFCLASSNONE;
2536 rproc->elf_machine = EM_NONE;
2537
2538 device_initialize(&rproc->dev);
2539 rproc->dev.parent = dev;
2540 rproc->dev.type = &rproc_type;
2541 rproc->dev.class = &rproc_class;
2542 rproc->dev.driver_data = rproc;
2543 idr_init(&rproc->notifyids);
2544
2545 rproc->name = kstrdup_const(name, GFP_KERNEL);
2546 if (!rproc->name)
2547 goto put_device;
2548
2549 if (rproc_alloc_firmware(rproc, name, firmware))
2550 goto put_device;
2551
2552 if (rproc_alloc_ops(rproc, ops))
2553 goto put_device;
2554
2555
2556 rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
2557 if (rproc->index < 0) {
2558 dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
2559 goto put_device;
2560 }
2561
2562 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
2563
2564 atomic_set(&rproc->power, 0);
2565
2566 mutex_init(&rproc->lock);
2567
2568 INIT_LIST_HEAD(&rproc->carveouts);
2569 INIT_LIST_HEAD(&rproc->mappings);
2570 INIT_LIST_HEAD(&rproc->traces);
2571 INIT_LIST_HEAD(&rproc->rvdevs);
2572 INIT_LIST_HEAD(&rproc->subdevs);
2573 INIT_LIST_HEAD(&rproc->dump_segments);
2574
2575 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
2576
2577 rproc->state = RPROC_OFFLINE;
2578
2579 return rproc;
2580
2581 put_device:
2582 put_device(&rproc->dev);
2583 return NULL;
2584 }
2585 EXPORT_SYMBOL(rproc_alloc);
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596 void rproc_free(struct rproc *rproc)
2597 {
2598 put_device(&rproc->dev);
2599 }
2600 EXPORT_SYMBOL(rproc_free);
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 void rproc_put(struct rproc *rproc)
2612 {
2613 module_put(rproc->dev.parent->driver->owner);
2614 put_device(&rproc->dev);
2615 }
2616 EXPORT_SYMBOL(rproc_put);
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 int rproc_del(struct rproc *rproc)
2634 {
2635 if (!rproc)
2636 return -EINVAL;
2637
2638
2639 rproc_shutdown(rproc);
2640
2641 mutex_lock(&rproc->lock);
2642 rproc->state = RPROC_DELETED;
2643 mutex_unlock(&rproc->lock);
2644
2645 rproc_delete_debug_dir(rproc);
2646
2647
2648 mutex_lock(&rproc_list_mutex);
2649 list_del_rcu(&rproc->node);
2650 mutex_unlock(&rproc_list_mutex);
2651
2652
2653 synchronize_rcu();
2654
2655 device_del(&rproc->dev);
2656 rproc_char_device_remove(rproc);
2657
2658 return 0;
2659 }
2660 EXPORT_SYMBOL(rproc_del);
2661
2662 static void devm_rproc_free(struct device *dev, void *res)
2663 {
2664 rproc_free(*(struct rproc **)res);
2665 }
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
2681 const struct rproc_ops *ops,
2682 const char *firmware, int len)
2683 {
2684 struct rproc **ptr, *rproc;
2685
2686 ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL);
2687 if (!ptr)
2688 return NULL;
2689
2690 rproc = rproc_alloc(dev, name, ops, firmware, len);
2691 if (rproc) {
2692 *ptr = rproc;
2693 devres_add(dev, ptr);
2694 } else {
2695 devres_free(ptr);
2696 }
2697
2698 return rproc;
2699 }
2700 EXPORT_SYMBOL(devm_rproc_alloc);
2701
2702
2703
2704
2705
2706
2707
2708
2709 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2710 {
2711 list_add_tail(&subdev->node, &rproc->subdevs);
2712 }
2713 EXPORT_SYMBOL(rproc_add_subdev);
2714
2715
2716
2717
2718
2719
2720 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2721 {
2722 list_del(&subdev->node);
2723 }
2724 EXPORT_SYMBOL(rproc_remove_subdev);
2725
2726
2727
2728
2729
2730
2731
2732 struct rproc *rproc_get_by_child(struct device *dev)
2733 {
2734 for (dev = dev->parent; dev; dev = dev->parent) {
2735 if (dev->type == &rproc_type)
2736 return dev->driver_data;
2737 }
2738
2739 return NULL;
2740 }
2741 EXPORT_SYMBOL(rproc_get_by_child);
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
2755 {
2756 if (!rproc) {
2757 pr_err("NULL rproc pointer\n");
2758 return;
2759 }
2760
2761
2762 pm_stay_awake(rproc->dev.parent);
2763
2764 dev_err(&rproc->dev, "crash detected in %s: type %s\n",
2765 rproc->name, rproc_crash_to_string(type));
2766
2767 queue_work(rproc_recovery_wq, &rproc->crash_handler);
2768 }
2769 EXPORT_SYMBOL(rproc_report_crash);
2770
2771 static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
2772 void *ptr)
2773 {
2774 unsigned int longest = 0;
2775 struct rproc *rproc;
2776 unsigned int d;
2777
2778 rcu_read_lock();
2779 list_for_each_entry_rcu(rproc, &rproc_list, node) {
2780 if (!rproc->ops->panic)
2781 continue;
2782
2783 if (rproc->state != RPROC_RUNNING &&
2784 rproc->state != RPROC_ATTACHED)
2785 continue;
2786
2787 d = rproc->ops->panic(rproc);
2788 longest = max(longest, d);
2789 }
2790 rcu_read_unlock();
2791
2792
2793
2794
2795
2796
2797
2798 mdelay(longest);
2799
2800 return NOTIFY_DONE;
2801 }
2802
2803 static void __init rproc_init_panic(void)
2804 {
2805 rproc_panic_nb.notifier_call = rproc_panic_handler;
2806 atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
2807 }
2808
2809 static void __exit rproc_exit_panic(void)
2810 {
2811 atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
2812 }
2813
2814 static int __init remoteproc_init(void)
2815 {
2816 rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
2817 WQ_UNBOUND | WQ_FREEZABLE, 0);
2818 if (!rproc_recovery_wq) {
2819 pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
2820 return -ENOMEM;
2821 }
2822
2823 rproc_init_sysfs();
2824 rproc_init_debugfs();
2825 rproc_init_cdev();
2826 rproc_init_panic();
2827
2828 return 0;
2829 }
2830 subsys_initcall(remoteproc_init);
2831
2832 static void __exit remoteproc_exit(void)
2833 {
2834 ida_destroy(&rproc_dev_index);
2835
2836 if (!rproc_recovery_wq)
2837 return;
2838
2839 rproc_exit_panic();
2840 rproc_exit_debugfs();
2841 rproc_exit_sysfs();
2842 destroy_workqueue(rproc_recovery_wq);
2843 }
2844 module_exit(remoteproc_exit);
2845
2846 MODULE_LICENSE("GPL v2");
2847 MODULE_DESCRIPTION("Generic Remote Processor Framework");