0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/atomic.h>
0015 #include <linux/bitfield.h>
0016 #include <linux/clk.h>
0017 #include <linux/dev_printk.h>
0018 #include <linux/dma-iommu.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/err.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/io-pgtable.h>
0023 #include <linux/iommu.h>
0024 #include <linux/iopoll.h>
0025 #include <linux/module.h>
0026 #include <linux/of.h>
0027 #include <linux/of_address.h>
0028 #include <linux/of_iommu.h>
0029 #include <linux/of_platform.h>
0030 #include <linux/pci.h>
0031 #include <linux/platform_device.h>
0032 #include <linux/slab.h>
0033 #include <linux/swab.h>
0034 #include <linux/types.h>
0035
0036 #define DART_MAX_STREAMS 16
0037 #define DART_MAX_TTBR 4
0038 #define MAX_DARTS_PER_DEVICE 2
0039
0040 #define DART_STREAM_ALL 0xffff
0041
0042 #define DART_PARAMS1 0x00
0043 #define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
0044
0045 #define DART_PARAMS2 0x04
0046 #define DART_PARAMS_BYPASS_SUPPORT BIT(0)
0047
0048 #define DART_STREAM_COMMAND 0x20
0049 #define DART_STREAM_COMMAND_BUSY BIT(2)
0050 #define DART_STREAM_COMMAND_INVALIDATE BIT(20)
0051
0052 #define DART_STREAM_SELECT 0x34
0053
0054 #define DART_ERROR 0x40
0055 #define DART_ERROR_STREAM GENMASK(27, 24)
0056 #define DART_ERROR_CODE GENMASK(11, 0)
0057 #define DART_ERROR_FLAG BIT(31)
0058
0059 #define DART_ERROR_READ_FAULT BIT(4)
0060 #define DART_ERROR_WRITE_FAULT BIT(3)
0061 #define DART_ERROR_NO_PTE BIT(2)
0062 #define DART_ERROR_NO_PMD BIT(1)
0063 #define DART_ERROR_NO_TTBR BIT(0)
0064
0065 #define DART_CONFIG 0x60
0066 #define DART_CONFIG_LOCK BIT(15)
0067
0068 #define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
0069
0070 #define DART_ERROR_ADDR_HI 0x54
0071 #define DART_ERROR_ADDR_LO 0x50
0072
0073 #define DART_STREAMS_ENABLE 0xfc
0074
0075 #define DART_TCR(sid) (0x100 + 4 * (sid))
0076 #define DART_TCR_TRANSLATE_ENABLE BIT(7)
0077 #define DART_TCR_BYPASS0_ENABLE BIT(8)
0078 #define DART_TCR_BYPASS1_ENABLE BIT(12)
0079
0080 #define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
0081 #define DART_TTBR_VALID BIT(31)
0082 #define DART_TTBR_SHIFT 12
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 struct apple_dart {
0100 struct device *dev;
0101
0102 void __iomem *regs;
0103
0104 int irq;
0105 struct clk_bulk_data *clks;
0106 int num_clks;
0107
0108 spinlock_t lock;
0109
0110 u32 pgsize;
0111 u32 supports_bypass : 1;
0112 u32 force_bypass : 1;
0113
0114 struct iommu_group *sid2group[DART_MAX_STREAMS];
0115 struct iommu_device iommu;
0116 };
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 struct apple_dart_stream_map {
0134 struct apple_dart *dart;
0135 unsigned long sidmap;
0136 };
0137 struct apple_dart_atomic_stream_map {
0138 struct apple_dart *dart;
0139 atomic64_t sidmap;
0140 };
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 struct apple_dart_domain {
0152 struct io_pgtable_ops *pgtbl_ops;
0153
0154 bool finalized;
0155 struct mutex init_lock;
0156 struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
0157
0158 struct iommu_domain domain;
0159 };
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 struct apple_dart_master_cfg {
0170 struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
0171 };
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 #define for_each_stream_map(i, base, stream_map) \
0182 for (i = 0, stream_map = &(base)->stream_maps[0]; \
0183 i < MAX_DARTS_PER_DEVICE && stream_map->dart; \
0184 stream_map = &(base)->stream_maps[++i])
0185
0186 static struct platform_driver apple_dart_driver;
0187 static const struct iommu_ops apple_dart_iommu_ops;
0188
0189 static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
0190 {
0191 return container_of(dom, struct apple_dart_domain, domain);
0192 }
0193
0194 static void
0195 apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
0196 {
0197 int sid;
0198
0199 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0200 writel(DART_TCR_TRANSLATE_ENABLE,
0201 stream_map->dart->regs + DART_TCR(sid));
0202 }
0203
0204 static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
0205 {
0206 int sid;
0207
0208 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0209 writel(0, stream_map->dart->regs + DART_TCR(sid));
0210 }
0211
0212 static void
0213 apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
0214 {
0215 int sid;
0216
0217 WARN_ON(!stream_map->dart->supports_bypass);
0218 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0219 writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
0220 stream_map->dart->regs + DART_TCR(sid));
0221 }
0222
0223 static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
0224 u8 idx, phys_addr_t paddr)
0225 {
0226 int sid;
0227
0228 WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
0229 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0230 writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
0231 stream_map->dart->regs + DART_TTBR(sid, idx));
0232 }
0233
0234 static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
0235 u8 idx)
0236 {
0237 int sid;
0238
0239 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0240 writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
0241 }
0242
0243 static void
0244 apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
0245 {
0246 int i;
0247
0248 for (i = 0; i < DART_MAX_TTBR; ++i)
0249 apple_dart_hw_clear_ttbr(stream_map, i);
0250 }
0251
0252 static int
0253 apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
0254 u32 command)
0255 {
0256 unsigned long flags;
0257 int ret;
0258 u32 command_reg;
0259
0260 spin_lock_irqsave(&stream_map->dart->lock, flags);
0261
0262 writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
0263 writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
0264
0265 ret = readl_poll_timeout_atomic(
0266 stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
0267 !(command_reg & DART_STREAM_COMMAND_BUSY), 1,
0268 DART_STREAM_COMMAND_BUSY_TIMEOUT);
0269
0270 spin_unlock_irqrestore(&stream_map->dart->lock, flags);
0271
0272 if (ret) {
0273 dev_err(stream_map->dart->dev,
0274 "busy bit did not clear after command %x for streams %lx\n",
0275 command, stream_map->sidmap);
0276 return ret;
0277 }
0278
0279 return 0;
0280 }
0281
0282 static int
0283 apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
0284 {
0285 return apple_dart_hw_stream_command(stream_map,
0286 DART_STREAM_COMMAND_INVALIDATE);
0287 }
0288
0289 static int apple_dart_hw_reset(struct apple_dart *dart)
0290 {
0291 u32 config;
0292 struct apple_dart_stream_map stream_map;
0293
0294 config = readl(dart->regs + DART_CONFIG);
0295 if (config & DART_CONFIG_LOCK) {
0296 dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
0297 config);
0298 return -EINVAL;
0299 }
0300
0301 stream_map.dart = dart;
0302 stream_map.sidmap = DART_STREAM_ALL;
0303 apple_dart_hw_disable_dma(&stream_map);
0304 apple_dart_hw_clear_all_ttbrs(&stream_map);
0305
0306
0307 writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
0308
0309
0310 writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
0311
0312 return apple_dart_hw_invalidate_tlb(&stream_map);
0313 }
0314
0315 static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
0316 {
0317 int i;
0318 struct apple_dart_atomic_stream_map *domain_stream_map;
0319 struct apple_dart_stream_map stream_map;
0320
0321 for_each_stream_map(i, domain, domain_stream_map) {
0322 stream_map.dart = domain_stream_map->dart;
0323 stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
0324 apple_dart_hw_invalidate_tlb(&stream_map);
0325 }
0326 }
0327
0328 static void apple_dart_flush_iotlb_all(struct iommu_domain *domain)
0329 {
0330 apple_dart_domain_flush_tlb(to_dart_domain(domain));
0331 }
0332
0333 static void apple_dart_iotlb_sync(struct iommu_domain *domain,
0334 struct iommu_iotlb_gather *gather)
0335 {
0336 apple_dart_domain_flush_tlb(to_dart_domain(domain));
0337 }
0338
0339 static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
0340 unsigned long iova, size_t size)
0341 {
0342 apple_dart_domain_flush_tlb(to_dart_domain(domain));
0343 }
0344
0345 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
0346 dma_addr_t iova)
0347 {
0348 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0349 struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
0350
0351 if (!ops)
0352 return 0;
0353
0354 return ops->iova_to_phys(ops, iova);
0355 }
0356
0357 static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
0358 phys_addr_t paddr, size_t pgsize,
0359 size_t pgcount, int prot, gfp_t gfp,
0360 size_t *mapped)
0361 {
0362 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0363 struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
0364
0365 if (!ops)
0366 return -ENODEV;
0367
0368 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp,
0369 mapped);
0370 }
0371
0372 static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
0373 unsigned long iova, size_t pgsize,
0374 size_t pgcount,
0375 struct iommu_iotlb_gather *gather)
0376 {
0377 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0378 struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
0379
0380 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
0381 }
0382
0383 static void
0384 apple_dart_setup_translation(struct apple_dart_domain *domain,
0385 struct apple_dart_stream_map *stream_map)
0386 {
0387 int i;
0388 struct io_pgtable_cfg *pgtbl_cfg =
0389 &io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
0390
0391 for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
0392 apple_dart_hw_set_ttbr(stream_map, i,
0393 pgtbl_cfg->apple_dart_cfg.ttbr[i]);
0394 for (; i < DART_MAX_TTBR; ++i)
0395 apple_dart_hw_clear_ttbr(stream_map, i);
0396
0397 apple_dart_hw_enable_translation(stream_map);
0398 apple_dart_hw_invalidate_tlb(stream_map);
0399 }
0400
0401 static int apple_dart_finalize_domain(struct iommu_domain *domain,
0402 struct apple_dart_master_cfg *cfg)
0403 {
0404 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0405 struct apple_dart *dart = cfg->stream_maps[0].dart;
0406 struct io_pgtable_cfg pgtbl_cfg;
0407 int ret = 0;
0408 int i;
0409
0410 mutex_lock(&dart_domain->init_lock);
0411
0412 if (dart_domain->finalized)
0413 goto done;
0414
0415 for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
0416 dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
0417 atomic64_set(&dart_domain->stream_maps[i].sidmap,
0418 cfg->stream_maps[i].sidmap);
0419 }
0420
0421 pgtbl_cfg = (struct io_pgtable_cfg){
0422 .pgsize_bitmap = dart->pgsize,
0423 .ias = 32,
0424 .oas = 36,
0425 .coherent_walk = 1,
0426 .iommu_dev = dart->dev,
0427 };
0428
0429 dart_domain->pgtbl_ops =
0430 alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
0431 if (!dart_domain->pgtbl_ops) {
0432 ret = -ENOMEM;
0433 goto done;
0434 }
0435
0436 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
0437 domain->geometry.aperture_start = 0;
0438 domain->geometry.aperture_end = DMA_BIT_MASK(32);
0439 domain->geometry.force_aperture = true;
0440
0441 dart_domain->finalized = true;
0442
0443 done:
0444 mutex_unlock(&dart_domain->init_lock);
0445 return ret;
0446 }
0447
0448 static int
0449 apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
0450 struct apple_dart_stream_map *master_maps,
0451 bool add_streams)
0452 {
0453 int i;
0454
0455 for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
0456 if (domain_maps[i].dart != master_maps[i].dart)
0457 return -EINVAL;
0458 }
0459
0460 for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
0461 if (!domain_maps[i].dart)
0462 break;
0463 if (add_streams)
0464 atomic64_or(master_maps[i].sidmap,
0465 &domain_maps[i].sidmap);
0466 else
0467 atomic64_and(~master_maps[i].sidmap,
0468 &domain_maps[i].sidmap);
0469 }
0470
0471 return 0;
0472 }
0473
0474 static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
0475 struct apple_dart_master_cfg *cfg)
0476 {
0477 return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
0478 true);
0479 }
0480
0481 static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
0482 struct apple_dart_master_cfg *cfg)
0483 {
0484 return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
0485 false);
0486 }
0487
0488 static int apple_dart_attach_dev(struct iommu_domain *domain,
0489 struct device *dev)
0490 {
0491 int ret, i;
0492 struct apple_dart_stream_map *stream_map;
0493 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0494 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0495
0496 if (cfg->stream_maps[0].dart->force_bypass &&
0497 domain->type != IOMMU_DOMAIN_IDENTITY)
0498 return -EINVAL;
0499 if (!cfg->stream_maps[0].dart->supports_bypass &&
0500 domain->type == IOMMU_DOMAIN_IDENTITY)
0501 return -EINVAL;
0502
0503 ret = apple_dart_finalize_domain(domain, cfg);
0504 if (ret)
0505 return ret;
0506
0507 switch (domain->type) {
0508 case IOMMU_DOMAIN_DMA:
0509 case IOMMU_DOMAIN_UNMANAGED:
0510 ret = apple_dart_domain_add_streams(dart_domain, cfg);
0511 if (ret)
0512 return ret;
0513
0514 for_each_stream_map(i, cfg, stream_map)
0515 apple_dart_setup_translation(dart_domain, stream_map);
0516 break;
0517 case IOMMU_DOMAIN_BLOCKED:
0518 for_each_stream_map(i, cfg, stream_map)
0519 apple_dart_hw_disable_dma(stream_map);
0520 break;
0521 case IOMMU_DOMAIN_IDENTITY:
0522 for_each_stream_map(i, cfg, stream_map)
0523 apple_dart_hw_enable_bypass(stream_map);
0524 break;
0525 }
0526
0527 return ret;
0528 }
0529
0530 static void apple_dart_detach_dev(struct iommu_domain *domain,
0531 struct device *dev)
0532 {
0533 int i;
0534 struct apple_dart_stream_map *stream_map;
0535 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0536 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0537
0538 for_each_stream_map(i, cfg, stream_map)
0539 apple_dart_hw_disable_dma(stream_map);
0540
0541 if (domain->type == IOMMU_DOMAIN_DMA ||
0542 domain->type == IOMMU_DOMAIN_UNMANAGED)
0543 apple_dart_domain_remove_streams(dart_domain, cfg);
0544 }
0545
0546 static struct iommu_device *apple_dart_probe_device(struct device *dev)
0547 {
0548 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0549 struct apple_dart_stream_map *stream_map;
0550 int i;
0551
0552 if (!cfg)
0553 return ERR_PTR(-ENODEV);
0554
0555 for_each_stream_map(i, cfg, stream_map)
0556 device_link_add(
0557 dev, stream_map->dart->dev,
0558 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
0559
0560 return &cfg->stream_maps[0].dart->iommu;
0561 }
0562
0563 static void apple_dart_release_device(struct device *dev)
0564 {
0565 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0566
0567 dev_iommu_priv_set(dev, NULL);
0568 kfree(cfg);
0569 }
0570
0571 static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
0572 {
0573 struct apple_dart_domain *dart_domain;
0574
0575 if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
0576 type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
0577 return NULL;
0578
0579 dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
0580 if (!dart_domain)
0581 return NULL;
0582
0583 mutex_init(&dart_domain->init_lock);
0584
0585
0586 if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
0587 dart_domain->finalized = true;
0588
0589 return &dart_domain->domain;
0590 }
0591
0592 static void apple_dart_domain_free(struct iommu_domain *domain)
0593 {
0594 struct apple_dart_domain *dart_domain = to_dart_domain(domain);
0595
0596 if (dart_domain->pgtbl_ops)
0597 free_io_pgtable_ops(dart_domain->pgtbl_ops);
0598
0599 kfree(dart_domain);
0600 }
0601
0602 static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
0603 {
0604 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0605 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
0606 struct apple_dart *dart = platform_get_drvdata(iommu_pdev);
0607 struct apple_dart *cfg_dart;
0608 int i, sid;
0609
0610 if (args->args_count != 1)
0611 return -EINVAL;
0612 sid = args->args[0];
0613
0614 if (!cfg)
0615 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
0616 if (!cfg)
0617 return -ENOMEM;
0618 dev_iommu_priv_set(dev, cfg);
0619
0620 cfg_dart = cfg->stream_maps[0].dart;
0621 if (cfg_dart) {
0622 if (cfg_dart->supports_bypass != dart->supports_bypass)
0623 return -EINVAL;
0624 if (cfg_dart->force_bypass != dart->force_bypass)
0625 return -EINVAL;
0626 if (cfg_dart->pgsize != dart->pgsize)
0627 return -EINVAL;
0628 }
0629
0630 for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
0631 if (cfg->stream_maps[i].dart == dart) {
0632 cfg->stream_maps[i].sidmap |= 1 << sid;
0633 return 0;
0634 }
0635 }
0636 for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
0637 if (!cfg->stream_maps[i].dart) {
0638 cfg->stream_maps[i].dart = dart;
0639 cfg->stream_maps[i].sidmap = 1 << sid;
0640 return 0;
0641 }
0642 }
0643
0644 return -EINVAL;
0645 }
0646
0647 static DEFINE_MUTEX(apple_dart_groups_lock);
0648
0649 static void apple_dart_release_group(void *iommu_data)
0650 {
0651 int i, sid;
0652 struct apple_dart_stream_map *stream_map;
0653 struct apple_dart_master_cfg *group_master_cfg = iommu_data;
0654
0655 mutex_lock(&apple_dart_groups_lock);
0656
0657 for_each_stream_map(i, group_master_cfg, stream_map)
0658 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0659 stream_map->dart->sid2group[sid] = NULL;
0660
0661 kfree(iommu_data);
0662 mutex_unlock(&apple_dart_groups_lock);
0663 }
0664
0665 static struct iommu_group *apple_dart_device_group(struct device *dev)
0666 {
0667 int i, sid;
0668 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0669 struct apple_dart_stream_map *stream_map;
0670 struct apple_dart_master_cfg *group_master_cfg;
0671 struct iommu_group *group = NULL;
0672 struct iommu_group *res = ERR_PTR(-EINVAL);
0673
0674 mutex_lock(&apple_dart_groups_lock);
0675
0676 for_each_stream_map(i, cfg, stream_map) {
0677 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
0678 struct iommu_group *stream_group =
0679 stream_map->dart->sid2group[sid];
0680
0681 if (group && group != stream_group) {
0682 res = ERR_PTR(-EINVAL);
0683 goto out;
0684 }
0685
0686 group = stream_group;
0687 }
0688 }
0689
0690 if (group) {
0691 res = iommu_group_ref_get(group);
0692 goto out;
0693 }
0694
0695 #ifdef CONFIG_PCI
0696 if (dev_is_pci(dev))
0697 group = pci_device_group(dev);
0698 else
0699 #endif
0700 group = generic_device_group(dev);
0701
0702 res = ERR_PTR(-ENOMEM);
0703 if (!group)
0704 goto out;
0705
0706 group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
0707 if (!group_master_cfg) {
0708 iommu_group_put(group);
0709 goto out;
0710 }
0711
0712 iommu_group_set_iommudata(group, group_master_cfg,
0713 apple_dart_release_group);
0714
0715 for_each_stream_map(i, cfg, stream_map)
0716 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
0717 stream_map->dart->sid2group[sid] = group;
0718
0719 res = group;
0720
0721 out:
0722 mutex_unlock(&apple_dart_groups_lock);
0723 return res;
0724 }
0725
0726 static int apple_dart_def_domain_type(struct device *dev)
0727 {
0728 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
0729
0730 if (cfg->stream_maps[0].dart->force_bypass)
0731 return IOMMU_DOMAIN_IDENTITY;
0732 if (!cfg->stream_maps[0].dart->supports_bypass)
0733 return IOMMU_DOMAIN_DMA;
0734
0735 return 0;
0736 }
0737
0738 #ifndef CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
0739
0740 #define CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR 0
0741 #endif
0742 #define DOORBELL_ADDR (CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR & PAGE_MASK)
0743
0744 static void apple_dart_get_resv_regions(struct device *dev,
0745 struct list_head *head)
0746 {
0747 if (IS_ENABLED(CONFIG_PCIE_APPLE) && dev_is_pci(dev)) {
0748 struct iommu_resv_region *region;
0749 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
0750
0751 region = iommu_alloc_resv_region(DOORBELL_ADDR,
0752 PAGE_SIZE, prot,
0753 IOMMU_RESV_MSI);
0754 if (!region)
0755 return;
0756
0757 list_add_tail(®ion->list, head);
0758 }
0759
0760 iommu_dma_get_resv_regions(dev, head);
0761 }
0762
0763 static const struct iommu_ops apple_dart_iommu_ops = {
0764 .domain_alloc = apple_dart_domain_alloc,
0765 .probe_device = apple_dart_probe_device,
0766 .release_device = apple_dart_release_device,
0767 .device_group = apple_dart_device_group,
0768 .of_xlate = apple_dart_of_xlate,
0769 .def_domain_type = apple_dart_def_domain_type,
0770 .get_resv_regions = apple_dart_get_resv_regions,
0771 .pgsize_bitmap = -1UL,
0772 .owner = THIS_MODULE,
0773 .default_domain_ops = &(const struct iommu_domain_ops) {
0774 .attach_dev = apple_dart_attach_dev,
0775 .detach_dev = apple_dart_detach_dev,
0776 .map_pages = apple_dart_map_pages,
0777 .unmap_pages = apple_dart_unmap_pages,
0778 .flush_iotlb_all = apple_dart_flush_iotlb_all,
0779 .iotlb_sync = apple_dart_iotlb_sync,
0780 .iotlb_sync_map = apple_dart_iotlb_sync_map,
0781 .iova_to_phys = apple_dart_iova_to_phys,
0782 .free = apple_dart_domain_free,
0783 }
0784 };
0785
0786 static irqreturn_t apple_dart_irq(int irq, void *dev)
0787 {
0788 struct apple_dart *dart = dev;
0789 const char *fault_name = NULL;
0790 u32 error = readl(dart->regs + DART_ERROR);
0791 u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
0792 u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
0793 u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
0794 u64 addr = addr_lo | (((u64)addr_hi) << 32);
0795 u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
0796
0797 if (!(error & DART_ERROR_FLAG))
0798 return IRQ_NONE;
0799
0800
0801 if (error_code == DART_ERROR_READ_FAULT)
0802 fault_name = "READ FAULT";
0803 else if (error_code == DART_ERROR_WRITE_FAULT)
0804 fault_name = "WRITE FAULT";
0805 else if (error_code == DART_ERROR_NO_PTE)
0806 fault_name = "NO PTE FOR IOVA";
0807 else if (error_code == DART_ERROR_NO_PMD)
0808 fault_name = "NO PMD FOR IOVA";
0809 else if (error_code == DART_ERROR_NO_TTBR)
0810 fault_name = "NO TTBR FOR IOVA";
0811 else
0812 fault_name = "unknown";
0813
0814 dev_err_ratelimited(
0815 dart->dev,
0816 "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
0817 error, stream_idx, error_code, fault_name, addr);
0818
0819 writel(error, dart->regs + DART_ERROR);
0820 return IRQ_HANDLED;
0821 }
0822
0823 static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
0824 {
0825 int ret;
0826
0827 if (!iommu_present(&platform_bus_type)) {
0828 ret = bus_set_iommu(&platform_bus_type, ops);
0829 if (ret)
0830 return ret;
0831 }
0832 #ifdef CONFIG_PCI
0833 if (!iommu_present(&pci_bus_type)) {
0834 ret = bus_set_iommu(&pci_bus_type, ops);
0835 if (ret) {
0836 bus_set_iommu(&platform_bus_type, NULL);
0837 return ret;
0838 }
0839 }
0840 #endif
0841 return 0;
0842 }
0843
0844 static int apple_dart_probe(struct platform_device *pdev)
0845 {
0846 int ret;
0847 u32 dart_params[2];
0848 struct resource *res;
0849 struct apple_dart *dart;
0850 struct device *dev = &pdev->dev;
0851
0852 dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL);
0853 if (!dart)
0854 return -ENOMEM;
0855
0856 dart->dev = dev;
0857 spin_lock_init(&dart->lock);
0858
0859 dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
0860 if (IS_ERR(dart->regs))
0861 return PTR_ERR(dart->regs);
0862
0863 if (resource_size(res) < 0x4000) {
0864 dev_err(dev, "MMIO region too small (%pr)\n", res);
0865 return -EINVAL;
0866 }
0867
0868 dart->irq = platform_get_irq(pdev, 0);
0869 if (dart->irq < 0)
0870 return -ENODEV;
0871
0872 ret = devm_clk_bulk_get_all(dev, &dart->clks);
0873 if (ret < 0)
0874 return ret;
0875 dart->num_clks = ret;
0876
0877 ret = clk_bulk_prepare_enable(dart->num_clks, dart->clks);
0878 if (ret)
0879 return ret;
0880
0881 ret = apple_dart_hw_reset(dart);
0882 if (ret)
0883 goto err_clk_disable;
0884
0885 dart_params[0] = readl(dart->regs + DART_PARAMS1);
0886 dart_params[1] = readl(dart->regs + DART_PARAMS2);
0887 dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
0888 dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
0889 dart->force_bypass = dart->pgsize > PAGE_SIZE;
0890
0891 ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
0892 "apple-dart fault handler", dart);
0893 if (ret)
0894 goto err_clk_disable;
0895
0896 platform_set_drvdata(pdev, dart);
0897
0898 ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
0899 if (ret)
0900 goto err_free_irq;
0901
0902 ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
0903 dev_name(&pdev->dev));
0904 if (ret)
0905 goto err_remove_bus_ops;
0906
0907 ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
0908 if (ret)
0909 goto err_sysfs_remove;
0910
0911 dev_info(
0912 &pdev->dev,
0913 "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
0914 dart->pgsize, dart->supports_bypass, dart->force_bypass);
0915 return 0;
0916
0917 err_sysfs_remove:
0918 iommu_device_sysfs_remove(&dart->iommu);
0919 err_remove_bus_ops:
0920 apple_dart_set_bus_ops(NULL);
0921 err_free_irq:
0922 free_irq(dart->irq, dart);
0923 err_clk_disable:
0924 clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
0925
0926 return ret;
0927 }
0928
0929 static int apple_dart_remove(struct platform_device *pdev)
0930 {
0931 struct apple_dart *dart = platform_get_drvdata(pdev);
0932
0933 apple_dart_hw_reset(dart);
0934 free_irq(dart->irq, dart);
0935 apple_dart_set_bus_ops(NULL);
0936
0937 iommu_device_unregister(&dart->iommu);
0938 iommu_device_sysfs_remove(&dart->iommu);
0939
0940 clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
0941
0942 return 0;
0943 }
0944
0945 static const struct of_device_id apple_dart_of_match[] = {
0946 { .compatible = "apple,t8103-dart", .data = NULL },
0947 {},
0948 };
0949 MODULE_DEVICE_TABLE(of, apple_dart_of_match);
0950
0951 static struct platform_driver apple_dart_driver = {
0952 .driver = {
0953 .name = "apple-dart",
0954 .of_match_table = apple_dart_of_match,
0955 .suppress_bind_attrs = true,
0956 },
0957 .probe = apple_dart_probe,
0958 .remove = apple_dart_remove,
0959 };
0960
0961 module_platform_driver(apple_dart_driver);
0962
0963 MODULE_DESCRIPTION("IOMMU API for Apple's DART");
0964 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
0965 MODULE_LICENSE("GPL v2");