0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define dev_fmt(fmt) "gart: " fmt
0011
0012 #include <linux/io.h>
0013 #include <linux/iommu.h>
0014 #include <linux/moduleparam.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/vmalloc.h>
0019
0020 #include <soc/tegra/mc.h>
0021
0022 #define GART_REG_BASE 0x24
0023 #define GART_CONFIG (0x24 - GART_REG_BASE)
0024 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
0025 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
0026
0027 #define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
0028
0029 #define GART_PAGE_SHIFT 12
0030 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
0031 #define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
0032
0033
0034 #define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
0035
0036 struct gart_device {
0037 void __iomem *regs;
0038 u32 *savedata;
0039 unsigned long iovmm_base;
0040 unsigned long iovmm_end;
0041 spinlock_t pte_lock;
0042 spinlock_t dom_lock;
0043 unsigned int active_devices;
0044 struct iommu_domain *active_domain;
0045 struct iommu_device iommu;
0046 struct device *dev;
0047 };
0048
0049 static struct gart_device *gart_handle;
0050
0051 static bool gart_debug;
0052
0053
0054
0055
0056
0057
0058 #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
0059
0060 #define for_each_gart_pte(gart, iova) \
0061 for (iova = gart->iovmm_base; \
0062 iova < gart->iovmm_end; \
0063 iova += GART_PAGE_SIZE)
0064
0065 static inline void gart_set_pte(struct gart_device *gart,
0066 unsigned long iova, unsigned long pte)
0067 {
0068 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
0069 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
0070 }
0071
0072 static inline unsigned long gart_read_pte(struct gart_device *gart,
0073 unsigned long iova)
0074 {
0075 unsigned long pte;
0076
0077 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
0078 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
0079
0080 return pte;
0081 }
0082
0083 static void do_gart_setup(struct gart_device *gart, const u32 *data)
0084 {
0085 unsigned long iova;
0086
0087 for_each_gart_pte(gart, iova)
0088 gart_set_pte(gart, iova, data ? *(data++) : 0);
0089
0090 writel_relaxed(1, gart->regs + GART_CONFIG);
0091 FLUSH_GART_REGS(gart);
0092 }
0093
0094 static inline bool gart_iova_range_invalid(struct gart_device *gart,
0095 unsigned long iova, size_t bytes)
0096 {
0097 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
0098 iova + bytes > gart->iovmm_end);
0099 }
0100
0101 static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
0102 {
0103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
0104 }
0105
0106 static int gart_iommu_attach_dev(struct iommu_domain *domain,
0107 struct device *dev)
0108 {
0109 struct gart_device *gart = gart_handle;
0110 int ret = 0;
0111
0112 spin_lock(&gart->dom_lock);
0113
0114 if (gart->active_domain && gart->active_domain != domain) {
0115 ret = -EBUSY;
0116 } else if (dev_iommu_priv_get(dev) != domain) {
0117 dev_iommu_priv_set(dev, domain);
0118 gart->active_domain = domain;
0119 gart->active_devices++;
0120 }
0121
0122 spin_unlock(&gart->dom_lock);
0123
0124 return ret;
0125 }
0126
0127 static void gart_iommu_detach_dev(struct iommu_domain *domain,
0128 struct device *dev)
0129 {
0130 struct gart_device *gart = gart_handle;
0131
0132 spin_lock(&gart->dom_lock);
0133
0134 if (dev_iommu_priv_get(dev) == domain) {
0135 dev_iommu_priv_set(dev, NULL);
0136
0137 if (--gart->active_devices == 0)
0138 gart->active_domain = NULL;
0139 }
0140
0141 spin_unlock(&gart->dom_lock);
0142 }
0143
0144 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
0145 {
0146 struct iommu_domain *domain;
0147
0148 if (type != IOMMU_DOMAIN_UNMANAGED)
0149 return NULL;
0150
0151 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
0152 if (domain) {
0153 domain->geometry.aperture_start = gart_handle->iovmm_base;
0154 domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
0155 domain->geometry.force_aperture = true;
0156 }
0157
0158 return domain;
0159 }
0160
0161 static void gart_iommu_domain_free(struct iommu_domain *domain)
0162 {
0163 WARN_ON(gart_handle->active_domain == domain);
0164 kfree(domain);
0165 }
0166
0167 static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
0168 unsigned long pa)
0169 {
0170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
0171 dev_err(gart->dev, "Page entry is in-use\n");
0172 return -EINVAL;
0173 }
0174
0175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
0176
0177 return 0;
0178 }
0179
0180 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
0181 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
0182 {
0183 struct gart_device *gart = gart_handle;
0184 int ret;
0185
0186 if (gart_iova_range_invalid(gart, iova, bytes))
0187 return -EINVAL;
0188
0189 spin_lock(&gart->pte_lock);
0190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
0191 spin_unlock(&gart->pte_lock);
0192
0193 return ret;
0194 }
0195
0196 static inline int __gart_iommu_unmap(struct gart_device *gart,
0197 unsigned long iova)
0198 {
0199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
0200 dev_err(gart->dev, "Page entry is invalid\n");
0201 return -EINVAL;
0202 }
0203
0204 gart_set_pte(gart, iova, 0);
0205
0206 return 0;
0207 }
0208
0209 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
0210 size_t bytes, struct iommu_iotlb_gather *gather)
0211 {
0212 struct gart_device *gart = gart_handle;
0213 int err;
0214
0215 if (gart_iova_range_invalid(gart, iova, bytes))
0216 return 0;
0217
0218 spin_lock(&gart->pte_lock);
0219 err = __gart_iommu_unmap(gart, iova);
0220 spin_unlock(&gart->pte_lock);
0221
0222 return err ? 0 : bytes;
0223 }
0224
0225 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
0226 dma_addr_t iova)
0227 {
0228 struct gart_device *gart = gart_handle;
0229 unsigned long pte;
0230
0231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
0232 return -EINVAL;
0233
0234 spin_lock(&gart->pte_lock);
0235 pte = gart_read_pte(gart, iova);
0236 spin_unlock(&gart->pte_lock);
0237
0238 return pte & GART_PAGE_MASK;
0239 }
0240
0241 static struct iommu_device *gart_iommu_probe_device(struct device *dev)
0242 {
0243 if (!dev_iommu_fwspec_get(dev))
0244 return ERR_PTR(-ENODEV);
0245
0246 return &gart_handle->iommu;
0247 }
0248
0249 static int gart_iommu_of_xlate(struct device *dev,
0250 struct of_phandle_args *args)
0251 {
0252 return 0;
0253 }
0254
0255 static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
0256 size_t size)
0257 {
0258 FLUSH_GART_REGS(gart_handle);
0259 }
0260
0261 static void gart_iommu_sync(struct iommu_domain *domain,
0262 struct iommu_iotlb_gather *gather)
0263 {
0264 size_t length = gather->end - gather->start + 1;
0265
0266 gart_iommu_sync_map(domain, gather->start, length);
0267 }
0268
0269 static const struct iommu_ops gart_iommu_ops = {
0270 .domain_alloc = gart_iommu_domain_alloc,
0271 .probe_device = gart_iommu_probe_device,
0272 .device_group = generic_device_group,
0273 .pgsize_bitmap = GART_IOMMU_PGSIZES,
0274 .of_xlate = gart_iommu_of_xlate,
0275 .default_domain_ops = &(const struct iommu_domain_ops) {
0276 .attach_dev = gart_iommu_attach_dev,
0277 .detach_dev = gart_iommu_detach_dev,
0278 .map = gart_iommu_map,
0279 .unmap = gart_iommu_unmap,
0280 .iova_to_phys = gart_iommu_iova_to_phys,
0281 .iotlb_sync_map = gart_iommu_sync_map,
0282 .iotlb_sync = gart_iommu_sync,
0283 .free = gart_iommu_domain_free,
0284 }
0285 };
0286
0287 int tegra_gart_suspend(struct gart_device *gart)
0288 {
0289 u32 *data = gart->savedata;
0290 unsigned long iova;
0291
0292
0293
0294
0295
0296
0297 writel_relaxed(0, gart->regs + GART_CONFIG);
0298 FLUSH_GART_REGS(gart);
0299
0300 for_each_gart_pte(gart, iova)
0301 *(data++) = gart_read_pte(gart, iova);
0302
0303 return 0;
0304 }
0305
0306 int tegra_gart_resume(struct gart_device *gart)
0307 {
0308 do_gart_setup(gart, gart->savedata);
0309
0310 return 0;
0311 }
0312
0313 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
0314 {
0315 struct gart_device *gart;
0316 struct resource *res;
0317 int err;
0318
0319 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
0320
0321
0322 res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
0323 if (!res) {
0324 dev_err(dev, "Memory aperture resource unavailable\n");
0325 return ERR_PTR(-ENXIO);
0326 }
0327
0328 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
0329 if (!gart)
0330 return ERR_PTR(-ENOMEM);
0331
0332 gart_handle = gart;
0333
0334 gart->dev = dev;
0335 gart->regs = mc->regs + GART_REG_BASE;
0336 gart->iovmm_base = res->start;
0337 gart->iovmm_end = res->end + 1;
0338 spin_lock_init(&gart->pte_lock);
0339 spin_lock_init(&gart->dom_lock);
0340
0341 do_gart_setup(gart, NULL);
0342
0343 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
0344 if (err)
0345 goto free_gart;
0346
0347 err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
0348 if (err)
0349 goto remove_sysfs;
0350
0351 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
0352 sizeof(u32));
0353 if (!gart->savedata) {
0354 err = -ENOMEM;
0355 goto unregister_iommu;
0356 }
0357
0358 return gart;
0359
0360 unregister_iommu:
0361 iommu_device_unregister(&gart->iommu);
0362 remove_sysfs:
0363 iommu_device_sysfs_remove(&gart->iommu);
0364 free_gart:
0365 kfree(gart);
0366
0367 return ERR_PTR(err);
0368 }
0369
0370 module_param(gart_debug, bool, 0644);
0371 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");