0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk.h>
0009 #include <linux/delay.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/io.h>
0012 #include <linux/list.h>
0013 #include <linux/module.h>
0014 #include <linux/of_device.h>
0015 #include <linux/of.h>
0016 #include <linux/pm_runtime.h>
0017 #include <linux/slab.h>
0018
0019 #include <soc/tegra/common.h>
0020
0021 #define CREATE_TRACE_POINTS
0022 #include <trace/events/host1x.h>
0023 #undef CREATE_TRACE_POINTS
0024
0025 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
0026 #include <asm/dma-iommu.h>
0027 #endif
0028
0029 #include "bus.h"
0030 #include "channel.h"
0031 #include "context.h"
0032 #include "debug.h"
0033 #include "dev.h"
0034 #include "intr.h"
0035
0036 #include "hw/host1x01.h"
0037 #include "hw/host1x02.h"
0038 #include "hw/host1x04.h"
0039 #include "hw/host1x05.h"
0040 #include "hw/host1x06.h"
0041 #include "hw/host1x07.h"
0042 #include "hw/host1x08.h"
0043
0044 void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
0045 {
0046 writel(v, host1x->common_regs + r);
0047 }
0048
0049 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
0050 {
0051 writel(v, host1x->hv_regs + r);
0052 }
0053
0054 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
0055 {
0056 return readl(host1x->hv_regs + r);
0057 }
0058
0059 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
0060 {
0061 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
0062
0063 writel(v, sync_regs + r);
0064 }
0065
0066 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
0067 {
0068 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
0069
0070 return readl(sync_regs + r);
0071 }
0072
0073 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
0074 {
0075 writel(v, ch->regs + r);
0076 }
0077
0078 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
0079 {
0080 return readl(ch->regs + r);
0081 }
0082
0083 static const struct host1x_info host1x01_info = {
0084 .nb_channels = 8,
0085 .nb_pts = 32,
0086 .nb_mlocks = 16,
0087 .nb_bases = 8,
0088 .init = host1x01_init,
0089 .sync_offset = 0x3000,
0090 .dma_mask = DMA_BIT_MASK(32),
0091 .has_wide_gather = false,
0092 .has_hypervisor = false,
0093 .num_sid_entries = 0,
0094 .sid_table = NULL,
0095 .reserve_vblank_syncpts = true,
0096 };
0097
0098 static const struct host1x_info host1x02_info = {
0099 .nb_channels = 9,
0100 .nb_pts = 32,
0101 .nb_mlocks = 16,
0102 .nb_bases = 12,
0103 .init = host1x02_init,
0104 .sync_offset = 0x3000,
0105 .dma_mask = DMA_BIT_MASK(32),
0106 .has_wide_gather = false,
0107 .has_hypervisor = false,
0108 .num_sid_entries = 0,
0109 .sid_table = NULL,
0110 .reserve_vblank_syncpts = true,
0111 };
0112
0113 static const struct host1x_info host1x04_info = {
0114 .nb_channels = 12,
0115 .nb_pts = 192,
0116 .nb_mlocks = 16,
0117 .nb_bases = 64,
0118 .init = host1x04_init,
0119 .sync_offset = 0x2100,
0120 .dma_mask = DMA_BIT_MASK(34),
0121 .has_wide_gather = false,
0122 .has_hypervisor = false,
0123 .num_sid_entries = 0,
0124 .sid_table = NULL,
0125 .reserve_vblank_syncpts = false,
0126 };
0127
0128 static const struct host1x_info host1x05_info = {
0129 .nb_channels = 14,
0130 .nb_pts = 192,
0131 .nb_mlocks = 16,
0132 .nb_bases = 64,
0133 .init = host1x05_init,
0134 .sync_offset = 0x2100,
0135 .dma_mask = DMA_BIT_MASK(34),
0136 .has_wide_gather = false,
0137 .has_hypervisor = false,
0138 .num_sid_entries = 0,
0139 .sid_table = NULL,
0140 .reserve_vblank_syncpts = false,
0141 };
0142
0143 static const struct host1x_sid_entry tegra186_sid_table[] = {
0144 {
0145
0146 .base = 0x1af0,
0147 .offset = 0x30,
0148 .limit = 0x34
0149 },
0150 {
0151
0152 .base = 0x1b00,
0153 .offset = 0x30,
0154 .limit = 0x34
0155 },
0156 };
0157
0158 static const struct host1x_info host1x06_info = {
0159 .nb_channels = 63,
0160 .nb_pts = 576,
0161 .nb_mlocks = 24,
0162 .nb_bases = 16,
0163 .init = host1x06_init,
0164 .sync_offset = 0x0,
0165 .dma_mask = DMA_BIT_MASK(40),
0166 .has_wide_gather = true,
0167 .has_hypervisor = true,
0168 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
0169 .sid_table = tegra186_sid_table,
0170 .reserve_vblank_syncpts = false,
0171 };
0172
0173 static const struct host1x_sid_entry tegra194_sid_table[] = {
0174 {
0175
0176 .base = 0x1af0,
0177 .offset = 0x30,
0178 .limit = 0x34
0179 },
0180 {
0181
0182 .base = 0x1b00,
0183 .offset = 0x30,
0184 .limit = 0x34
0185 },
0186 {
0187
0188 .base = 0x1bc0,
0189 .offset = 0x30,
0190 .limit = 0x34
0191 },
0192 };
0193
0194 static const struct host1x_info host1x07_info = {
0195 .nb_channels = 63,
0196 .nb_pts = 704,
0197 .nb_mlocks = 32,
0198 .nb_bases = 0,
0199 .init = host1x07_init,
0200 .sync_offset = 0x0,
0201 .dma_mask = DMA_BIT_MASK(40),
0202 .has_wide_gather = true,
0203 .has_hypervisor = true,
0204 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
0205 .sid_table = tegra194_sid_table,
0206 .reserve_vblank_syncpts = false,
0207 };
0208
0209
0210
0211
0212
0213
0214
0215 static const struct host1x_sid_entry tegra234_sid_table[] = {
0216 {
0217
0218 .base = 0x17b8,
0219 .offset = 0x30,
0220 .limit = 0x30
0221 },
0222 {
0223
0224 .base = 0x1688,
0225 .offset = 0x34,
0226 .limit = 0x34
0227 },
0228 };
0229
0230 static const struct host1x_info host1x08_info = {
0231 .nb_channels = 63,
0232 .nb_pts = 1024,
0233 .nb_mlocks = 24,
0234 .nb_bases = 0,
0235 .init = host1x08_init,
0236 .sync_offset = 0x0,
0237 .dma_mask = DMA_BIT_MASK(40),
0238 .has_wide_gather = true,
0239 .has_hypervisor = true,
0240 .has_common = true,
0241 .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
0242 .sid_table = tegra234_sid_table,
0243 .streamid_vm_table = { 0x1004, 128 },
0244 .classid_vm_table = { 0x1404, 25 },
0245 .mmio_vm_table = { 0x1504, 25 },
0246 .reserve_vblank_syncpts = false,
0247 };
0248
0249 static const struct of_device_id host1x_of_match[] = {
0250 { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
0251 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
0252 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
0253 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
0254 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
0255 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
0256 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
0257 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
0258 { },
0259 };
0260 MODULE_DEVICE_TABLE(of, host1x_of_match);
0261
0262 static void host1x_setup_virtualization_tables(struct host1x *host)
0263 {
0264 const struct host1x_info *info = host->info;
0265 unsigned int i;
0266
0267 if (!info->has_hypervisor)
0268 return;
0269
0270 for (i = 0; i < info->num_sid_entries; i++) {
0271 const struct host1x_sid_entry *entry = &info->sid_table[i];
0272
0273 host1x_hypervisor_writel(host, entry->offset, entry->base);
0274 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
0275 }
0276
0277 for (i = 0; i < info->streamid_vm_table.count; i++) {
0278
0279 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
0280 }
0281
0282 for (i = 0; i < info->classid_vm_table.count; i++) {
0283
0284 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
0285 }
0286
0287 for (i = 0; i < info->mmio_vm_table.count; i++) {
0288
0289 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
0290 }
0291 }
0292
0293 static bool host1x_wants_iommu(struct host1x *host1x)
0294 {
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
0321 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
0322 return false;
0323 }
0324
0325 return true;
0326 }
0327
0328 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
0329 {
0330 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
0331 int err;
0332
0333 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
0334 if (host->dev->archdata.mapping) {
0335 struct dma_iommu_mapping *mapping =
0336 to_dma_iommu_mapping(host->dev);
0337 arm_iommu_detach_device(host->dev);
0338 arm_iommu_release_mapping(mapping);
0339
0340 domain = iommu_get_domain_for_dev(host->dev);
0341 }
0342 #endif
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 if (!host1x_wants_iommu(host) || domain)
0353 return domain;
0354
0355 host->group = iommu_group_get(host->dev);
0356 if (host->group) {
0357 struct iommu_domain_geometry *geometry;
0358 dma_addr_t start, end;
0359 unsigned long order;
0360
0361 err = iova_cache_get();
0362 if (err < 0)
0363 goto put_group;
0364
0365 host->domain = iommu_domain_alloc(&platform_bus_type);
0366 if (!host->domain) {
0367 err = -ENOMEM;
0368 goto put_cache;
0369 }
0370
0371 err = iommu_attach_group(host->domain, host->group);
0372 if (err) {
0373 if (err == -ENODEV)
0374 err = 0;
0375
0376 goto free_domain;
0377 }
0378
0379 geometry = &host->domain->geometry;
0380 start = geometry->aperture_start & host->info->dma_mask;
0381 end = geometry->aperture_end & host->info->dma_mask;
0382
0383 order = __ffs(host->domain->pgsize_bitmap);
0384 init_iova_domain(&host->iova, 1UL << order, start >> order);
0385 host->iova_end = end;
0386
0387 domain = host->domain;
0388 }
0389
0390 return domain;
0391
0392 free_domain:
0393 iommu_domain_free(host->domain);
0394 host->domain = NULL;
0395 put_cache:
0396 iova_cache_put();
0397 put_group:
0398 iommu_group_put(host->group);
0399 host->group = NULL;
0400
0401 return ERR_PTR(err);
0402 }
0403
0404 static int host1x_iommu_init(struct host1x *host)
0405 {
0406 u64 mask = host->info->dma_mask;
0407 struct iommu_domain *domain;
0408 int err;
0409
0410 domain = host1x_iommu_attach(host);
0411 if (IS_ERR(domain)) {
0412 err = PTR_ERR(domain);
0413 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
0414 return err;
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425 if (!domain && !host->info->has_wide_gather)
0426 mask = DMA_BIT_MASK(32);
0427
0428 err = dma_coerce_mask_and_coherent(host->dev, mask);
0429 if (err < 0) {
0430 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
0431 return err;
0432 }
0433
0434 return 0;
0435 }
0436
0437 static void host1x_iommu_exit(struct host1x *host)
0438 {
0439 if (host->domain) {
0440 put_iova_domain(&host->iova);
0441 iommu_detach_group(host->domain, host->group);
0442
0443 iommu_domain_free(host->domain);
0444 host->domain = NULL;
0445
0446 iova_cache_put();
0447
0448 iommu_group_put(host->group);
0449 host->group = NULL;
0450 }
0451 }
0452
0453 static int host1x_get_resets(struct host1x *host)
0454 {
0455 int err;
0456
0457 host->resets[0].id = "mc";
0458 host->resets[1].id = "host1x";
0459 host->nresets = ARRAY_SIZE(host->resets);
0460
0461 err = devm_reset_control_bulk_get_optional_exclusive_released(
0462 host->dev, host->nresets, host->resets);
0463 if (err) {
0464 dev_err(host->dev, "failed to get reset: %d\n", err);
0465 return err;
0466 }
0467
0468 return 0;
0469 }
0470
0471 static int host1x_probe(struct platform_device *pdev)
0472 {
0473 struct host1x *host;
0474 int syncpt_irq;
0475 int err;
0476
0477 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
0478 if (!host)
0479 return -ENOMEM;
0480
0481 host->info = of_device_get_match_data(&pdev->dev);
0482
0483 if (host->info->has_hypervisor) {
0484 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
0485 if (IS_ERR(host->regs))
0486 return PTR_ERR(host->regs);
0487
0488 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
0489 if (IS_ERR(host->hv_regs))
0490 return PTR_ERR(host->hv_regs);
0491
0492 if (host->info->has_common) {
0493 host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
0494 if (IS_ERR(host->common_regs))
0495 return PTR_ERR(host->common_regs);
0496 }
0497 } else {
0498 host->regs = devm_platform_ioremap_resource(pdev, 0);
0499 if (IS_ERR(host->regs))
0500 return PTR_ERR(host->regs);
0501 }
0502
0503 syncpt_irq = platform_get_irq(pdev, 0);
0504 if (syncpt_irq < 0)
0505 return syncpt_irq;
0506
0507 mutex_init(&host->devices_lock);
0508 INIT_LIST_HEAD(&host->devices);
0509 INIT_LIST_HEAD(&host->list);
0510 host->dev = &pdev->dev;
0511
0512
0513 platform_set_drvdata(pdev, host);
0514
0515 host->dev->dma_parms = &host->dma_parms;
0516 dma_set_max_seg_size(host->dev, UINT_MAX);
0517
0518 if (host->info->init) {
0519 err = host->info->init(host);
0520 if (err)
0521 return err;
0522 }
0523
0524 host->clk = devm_clk_get(&pdev->dev, NULL);
0525 if (IS_ERR(host->clk)) {
0526 err = PTR_ERR(host->clk);
0527
0528 if (err != -EPROBE_DEFER)
0529 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
0530
0531 return err;
0532 }
0533
0534 err = host1x_get_resets(host);
0535 if (err)
0536 return err;
0537
0538 host1x_bo_cache_init(&host->cache);
0539
0540 err = host1x_iommu_init(host);
0541 if (err < 0) {
0542 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
0543 goto destroy_cache;
0544 }
0545
0546 err = host1x_channel_list_init(&host->channel_list,
0547 host->info->nb_channels);
0548 if (err) {
0549 dev_err(&pdev->dev, "failed to initialize channel list\n");
0550 goto iommu_exit;
0551 }
0552
0553 err = host1x_memory_context_list_init(host);
0554 if (err) {
0555 dev_err(&pdev->dev, "failed to initialize context list\n");
0556 goto free_channels;
0557 }
0558
0559 err = host1x_syncpt_init(host);
0560 if (err) {
0561 dev_err(&pdev->dev, "failed to initialize syncpts\n");
0562 goto free_contexts;
0563 }
0564
0565 err = host1x_intr_init(host, syncpt_irq);
0566 if (err) {
0567 dev_err(&pdev->dev, "failed to initialize interrupts\n");
0568 goto deinit_syncpt;
0569 }
0570
0571 pm_runtime_enable(&pdev->dev);
0572
0573 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
0574 if (err)
0575 goto pm_disable;
0576
0577
0578 err = pm_runtime_resume_and_get(&pdev->dev);
0579 if (err)
0580 goto pm_disable;
0581
0582 host1x_debug_init(host);
0583
0584 err = host1x_register(host);
0585 if (err < 0)
0586 goto deinit_debugfs;
0587
0588 err = devm_of_platform_populate(&pdev->dev);
0589 if (err < 0)
0590 goto unregister;
0591
0592 return 0;
0593
0594 unregister:
0595 host1x_unregister(host);
0596 deinit_debugfs:
0597 host1x_debug_deinit(host);
0598
0599 pm_runtime_put_sync_suspend(&pdev->dev);
0600 pm_disable:
0601 pm_runtime_disable(&pdev->dev);
0602
0603 host1x_intr_deinit(host);
0604 deinit_syncpt:
0605 host1x_syncpt_deinit(host);
0606 free_contexts:
0607 host1x_memory_context_list_free(&host->context_list);
0608 free_channels:
0609 host1x_channel_list_free(&host->channel_list);
0610 iommu_exit:
0611 host1x_iommu_exit(host);
0612 destroy_cache:
0613 host1x_bo_cache_destroy(&host->cache);
0614
0615 return err;
0616 }
0617
0618 static int host1x_remove(struct platform_device *pdev)
0619 {
0620 struct host1x *host = platform_get_drvdata(pdev);
0621
0622 host1x_unregister(host);
0623 host1x_debug_deinit(host);
0624
0625 pm_runtime_force_suspend(&pdev->dev);
0626
0627 host1x_intr_deinit(host);
0628 host1x_syncpt_deinit(host);
0629 host1x_memory_context_list_free(&host->context_list);
0630 host1x_channel_list_free(&host->channel_list);
0631 host1x_iommu_exit(host);
0632 host1x_bo_cache_destroy(&host->cache);
0633
0634 return 0;
0635 }
0636
0637 static int __maybe_unused host1x_runtime_suspend(struct device *dev)
0638 {
0639 struct host1x *host = dev_get_drvdata(dev);
0640 int err;
0641
0642 host1x_intr_stop(host);
0643 host1x_syncpt_save(host);
0644
0645 err = reset_control_bulk_assert(host->nresets, host->resets);
0646 if (err) {
0647 dev_err(dev, "failed to assert reset: %d\n", err);
0648 goto resume_host1x;
0649 }
0650
0651 usleep_range(1000, 2000);
0652
0653 clk_disable_unprepare(host->clk);
0654 reset_control_bulk_release(host->nresets, host->resets);
0655
0656 return 0;
0657
0658 resume_host1x:
0659 host1x_setup_virtualization_tables(host);
0660 host1x_syncpt_restore(host);
0661 host1x_intr_start(host);
0662
0663 return err;
0664 }
0665
0666 static int __maybe_unused host1x_runtime_resume(struct device *dev)
0667 {
0668 struct host1x *host = dev_get_drvdata(dev);
0669 int err;
0670
0671 err = reset_control_bulk_acquire(host->nresets, host->resets);
0672 if (err) {
0673 dev_err(dev, "failed to acquire reset: %d\n", err);
0674 return err;
0675 }
0676
0677 err = clk_prepare_enable(host->clk);
0678 if (err) {
0679 dev_err(dev, "failed to enable clock: %d\n", err);
0680 goto release_reset;
0681 }
0682
0683 err = reset_control_bulk_deassert(host->nresets, host->resets);
0684 if (err < 0) {
0685 dev_err(dev, "failed to deassert reset: %d\n", err);
0686 goto disable_clk;
0687 }
0688
0689 host1x_setup_virtualization_tables(host);
0690 host1x_syncpt_restore(host);
0691 host1x_intr_start(host);
0692
0693 return 0;
0694
0695 disable_clk:
0696 clk_disable_unprepare(host->clk);
0697 release_reset:
0698 reset_control_bulk_release(host->nresets, host->resets);
0699
0700 return err;
0701 }
0702
0703 static const struct dev_pm_ops host1x_pm_ops = {
0704 SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
0705 NULL)
0706
0707 };
0708
0709 static struct platform_driver tegra_host1x_driver = {
0710 .driver = {
0711 .name = "tegra-host1x",
0712 .of_match_table = host1x_of_match,
0713 .pm = &host1x_pm_ops,
0714 },
0715 .probe = host1x_probe,
0716 .remove = host1x_remove,
0717 };
0718
0719 static struct platform_driver * const drivers[] = {
0720 &tegra_host1x_driver,
0721 &tegra_mipi_driver,
0722 };
0723
0724 static int __init tegra_host1x_init(void)
0725 {
0726 int err;
0727
0728 err = bus_register(&host1x_bus_type);
0729 if (err < 0)
0730 return err;
0731
0732 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
0733 if (err < 0)
0734 bus_unregister(&host1x_bus_type);
0735
0736 return err;
0737 }
0738 module_init(tegra_host1x_init);
0739
0740 static void __exit tegra_host1x_exit(void)
0741 {
0742 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
0743 bus_unregister(&host1x_bus_type);
0744 }
0745 module_exit(tegra_host1x_exit);
0746
0747
0748
0749
0750
0751
0752
0753
0754 u64 host1x_get_dma_mask(struct host1x *host1x)
0755 {
0756 return host1x->info->dma_mask;
0757 }
0758 EXPORT_SYMBOL(host1x_get_dma_mask);
0759
0760 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
0761 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
0762 MODULE_DESCRIPTION("Host1x driver for Tegra products");
0763 MODULE_LICENSE("GPL");