0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
0009
0010 #include "fsl_pamu_domain.h"
0011
0012 #include <linux/platform_device.h>
0013 #include <sysdev/fsl_pci.h>
0014
0015
0016
0017
0018
0019 static DEFINE_SPINLOCK(iommu_lock);
0020
0021 static struct kmem_cache *fsl_pamu_domain_cache;
0022 static struct kmem_cache *iommu_devinfo_cache;
0023 static DEFINE_SPINLOCK(device_domain_lock);
0024
0025 struct iommu_device pamu_iommu;
0026
0027 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
0028 {
0029 return container_of(dom, struct fsl_dma_domain, iommu_domain);
0030 }
0031
0032 static int __init iommu_init_mempool(void)
0033 {
0034 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
0035 sizeof(struct fsl_dma_domain),
0036 0,
0037 SLAB_HWCACHE_ALIGN,
0038 NULL);
0039 if (!fsl_pamu_domain_cache) {
0040 pr_debug("Couldn't create fsl iommu_domain cache\n");
0041 return -ENOMEM;
0042 }
0043
0044 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
0045 sizeof(struct device_domain_info),
0046 0,
0047 SLAB_HWCACHE_ALIGN,
0048 NULL);
0049 if (!iommu_devinfo_cache) {
0050 pr_debug("Couldn't create devinfo cache\n");
0051 kmem_cache_destroy(fsl_pamu_domain_cache);
0052 return -ENOMEM;
0053 }
0054
0055 return 0;
0056 }
0057
0058 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
0059 u32 val)
0060 {
0061 int ret = 0;
0062 unsigned long flags;
0063
0064 spin_lock_irqsave(&iommu_lock, flags);
0065 ret = pamu_update_paace_stash(liodn, val);
0066 if (ret) {
0067 pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
0068 spin_unlock_irqrestore(&iommu_lock, flags);
0069 return ret;
0070 }
0071
0072 spin_unlock_irqrestore(&iommu_lock, flags);
0073
0074 return ret;
0075 }
0076
0077
0078 static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
0079 int liodn)
0080 {
0081 u32 omi_index = ~(u32)0;
0082 unsigned long flags;
0083 int ret;
0084
0085
0086
0087
0088
0089
0090 get_ome_index(&omi_index, dev);
0091
0092 spin_lock_irqsave(&iommu_lock, flags);
0093 ret = pamu_disable_liodn(liodn);
0094 if (ret)
0095 goto out_unlock;
0096 ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
0097 if (ret)
0098 goto out_unlock;
0099 ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
0100 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
0101 out_unlock:
0102 spin_unlock_irqrestore(&iommu_lock, flags);
0103 if (ret) {
0104 pr_debug("PAACE configuration failed for liodn %d\n",
0105 liodn);
0106 }
0107 return ret;
0108 }
0109
0110 static void remove_device_ref(struct device_domain_info *info)
0111 {
0112 unsigned long flags;
0113
0114 list_del(&info->link);
0115 spin_lock_irqsave(&iommu_lock, flags);
0116 pamu_disable_liodn(info->liodn);
0117 spin_unlock_irqrestore(&iommu_lock, flags);
0118 spin_lock_irqsave(&device_domain_lock, flags);
0119 dev_iommu_priv_set(info->dev, NULL);
0120 kmem_cache_free(iommu_devinfo_cache, info);
0121 spin_unlock_irqrestore(&device_domain_lock, flags);
0122 }
0123
0124 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
0125 {
0126 struct device_domain_info *info, *tmp;
0127 unsigned long flags;
0128
0129 spin_lock_irqsave(&dma_domain->domain_lock, flags);
0130
0131 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
0132 if (!dev || (info->dev == dev))
0133 remove_device_ref(info);
0134 }
0135 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
0136 }
0137
0138 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
0139 {
0140 struct device_domain_info *info, *old_domain_info;
0141 unsigned long flags;
0142
0143 spin_lock_irqsave(&device_domain_lock, flags);
0144
0145
0146
0147
0148 old_domain_info = dev_iommu_priv_get(dev);
0149 if (old_domain_info && old_domain_info->domain != dma_domain) {
0150 spin_unlock_irqrestore(&device_domain_lock, flags);
0151 detach_device(dev, old_domain_info->domain);
0152 spin_lock_irqsave(&device_domain_lock, flags);
0153 }
0154
0155 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
0156
0157 info->dev = dev;
0158 info->liodn = liodn;
0159 info->domain = dma_domain;
0160
0161 list_add(&info->link, &dma_domain->devices);
0162
0163
0164
0165
0166
0167 if (!dev_iommu_priv_get(dev))
0168 dev_iommu_priv_set(dev, info);
0169 spin_unlock_irqrestore(&device_domain_lock, flags);
0170 }
0171
0172 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
0173 dma_addr_t iova)
0174 {
0175 if (iova < domain->geometry.aperture_start ||
0176 iova > domain->geometry.aperture_end)
0177 return 0;
0178 return iova;
0179 }
0180
0181 static bool fsl_pamu_capable(enum iommu_cap cap)
0182 {
0183 return cap == IOMMU_CAP_CACHE_COHERENCY;
0184 }
0185
0186 static void fsl_pamu_domain_free(struct iommu_domain *domain)
0187 {
0188 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
0189
0190
0191 detach_device(NULL, dma_domain);
0192 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
0193 }
0194
0195 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
0196 {
0197 struct fsl_dma_domain *dma_domain;
0198
0199 if (type != IOMMU_DOMAIN_UNMANAGED)
0200 return NULL;
0201
0202 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
0203 if (!dma_domain)
0204 return NULL;
0205
0206 dma_domain->stash_id = ~(u32)0;
0207 INIT_LIST_HEAD(&dma_domain->devices);
0208 spin_lock_init(&dma_domain->domain_lock);
0209
0210
0211 dma_domain->iommu_domain. geometry.aperture_start = 0;
0212 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
0213 dma_domain->iommu_domain.geometry.force_aperture = true;
0214
0215 return &dma_domain->iommu_domain;
0216 }
0217
0218
0219 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
0220 {
0221 struct device_domain_info *info;
0222 int ret = 0;
0223
0224 list_for_each_entry(info, &dma_domain->devices, link) {
0225 ret = update_liodn_stash(info->liodn, dma_domain, val);
0226 if (ret)
0227 break;
0228 }
0229
0230 return ret;
0231 }
0232
0233 static int fsl_pamu_attach_device(struct iommu_domain *domain,
0234 struct device *dev)
0235 {
0236 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
0237 unsigned long flags;
0238 int len, ret = 0, i;
0239 const u32 *liodn;
0240 struct pci_dev *pdev = NULL;
0241 struct pci_controller *pci_ctl;
0242
0243
0244
0245
0246
0247 if (dev_is_pci(dev)) {
0248 pdev = to_pci_dev(dev);
0249 pci_ctl = pci_bus_to_host(pdev->bus);
0250
0251
0252
0253
0254
0255 dev = pci_ctl->parent;
0256 }
0257
0258 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
0259 if (!liodn) {
0260 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
0261 return -EINVAL;
0262 }
0263
0264 spin_lock_irqsave(&dma_domain->domain_lock, flags);
0265 for (i = 0; i < len / sizeof(u32); i++) {
0266
0267 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
0268 pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
0269 liodn[i], dev->of_node);
0270 ret = -EINVAL;
0271 break;
0272 }
0273
0274 attach_device(dma_domain, liodn[i], dev);
0275 ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
0276 if (ret)
0277 break;
0278 ret = pamu_enable_liodn(liodn[i]);
0279 if (ret)
0280 break;
0281 }
0282 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
0283 return ret;
0284 }
0285
0286 static void fsl_pamu_detach_device(struct iommu_domain *domain,
0287 struct device *dev)
0288 {
0289 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
0290 const u32 *prop;
0291 int len;
0292 struct pci_dev *pdev = NULL;
0293 struct pci_controller *pci_ctl;
0294
0295
0296
0297
0298
0299 if (dev_is_pci(dev)) {
0300 pdev = to_pci_dev(dev);
0301 pci_ctl = pci_bus_to_host(pdev->bus);
0302
0303
0304
0305
0306
0307 dev = pci_ctl->parent;
0308 }
0309
0310 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
0311 if (prop)
0312 detach_device(dev, dma_domain);
0313 else
0314 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
0315 }
0316
0317
0318 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
0319 {
0320 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
0321 unsigned long flags;
0322 int ret;
0323
0324 spin_lock_irqsave(&dma_domain->domain_lock, flags);
0325 dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
0326 if (dma_domain->stash_id == ~(u32)0) {
0327 pr_debug("Invalid stash attributes\n");
0328 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
0329 return -EINVAL;
0330 }
0331 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
0332 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
0333
0334 return ret;
0335 }
0336
0337 static struct iommu_group *get_device_iommu_group(struct device *dev)
0338 {
0339 struct iommu_group *group;
0340
0341 group = iommu_group_get(dev);
0342 if (!group)
0343 group = iommu_group_alloc();
0344
0345 return group;
0346 }
0347
0348 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
0349 {
0350 u32 version;
0351
0352
0353 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
0354 version &= PCI_FSL_BRR1_VER;
0355
0356 return version >= 0x204;
0357 }
0358
0359
0360 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
0361 {
0362 struct pci_dev *tmp;
0363 struct iommu_group *group;
0364 struct pci_bus *bus = pdev->bus;
0365
0366
0367
0368
0369
0370 while (bus) {
0371 list_for_each_entry(tmp, &bus->devices, bus_list) {
0372 if (tmp == pdev)
0373 continue;
0374 group = iommu_group_get(&tmp->dev);
0375 if (group)
0376 return group;
0377 }
0378
0379 bus = bus->parent;
0380 }
0381
0382 return NULL;
0383 }
0384
0385 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
0386 {
0387 struct pci_controller *pci_ctl;
0388 bool pci_endpt_partitioning;
0389 struct iommu_group *group = NULL;
0390
0391 pci_ctl = pci_bus_to_host(pdev->bus);
0392 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
0393
0394 if (pci_endpt_partitioning) {
0395 group = pci_device_group(&pdev->dev);
0396
0397
0398
0399
0400
0401 if (pci_ctl->parent->iommu_group)
0402 iommu_group_remove_device(pci_ctl->parent);
0403 } else {
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 if (pci_ctl->parent->iommu_group) {
0415 group = get_device_iommu_group(pci_ctl->parent);
0416 iommu_group_remove_device(pci_ctl->parent);
0417 } else {
0418 group = get_shared_pci_device_group(pdev);
0419 }
0420 }
0421
0422 if (!group)
0423 group = ERR_PTR(-ENODEV);
0424
0425 return group;
0426 }
0427
0428 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
0429 {
0430 struct iommu_group *group = ERR_PTR(-ENODEV);
0431 int len;
0432
0433
0434
0435
0436
0437 if (dev_is_pci(dev))
0438 group = get_pci_device_group(to_pci_dev(dev));
0439 else if (of_get_property(dev->of_node, "fsl,liodn", &len))
0440 group = get_device_iommu_group(dev);
0441
0442 return group;
0443 }
0444
0445 static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
0446 {
0447 return &pamu_iommu;
0448 }
0449
0450 static const struct iommu_ops fsl_pamu_ops = {
0451 .capable = fsl_pamu_capable,
0452 .domain_alloc = fsl_pamu_domain_alloc,
0453 .probe_device = fsl_pamu_probe_device,
0454 .device_group = fsl_pamu_device_group,
0455 .default_domain_ops = &(const struct iommu_domain_ops) {
0456 .attach_dev = fsl_pamu_attach_device,
0457 .detach_dev = fsl_pamu_detach_device,
0458 .iova_to_phys = fsl_pamu_iova_to_phys,
0459 .free = fsl_pamu_domain_free,
0460 }
0461 };
0462
0463 int __init pamu_domain_init(void)
0464 {
0465 int ret = 0;
0466
0467 ret = iommu_init_mempool();
0468 if (ret)
0469 return ret;
0470
0471 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
0472 if (ret)
0473 return ret;
0474
0475 ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
0476 if (ret) {
0477 iommu_device_sysfs_remove(&pamu_iommu);
0478 pr_err("Can't register iommu device\n");
0479 return ret;
0480 }
0481
0482 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
0483 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
0484
0485 return ret;
0486 }