0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dma-mapping.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/pm_runtime.h>
0016 #include <linux/slab.h>
0017 #include <linux/vmalloc.h>
0018
0019 #include <asm/set_memory.h>
0020
0021 #include "ipu3-mmu.h"
0022
0023 #define IPU3_PT_BITS 10
0024 #define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
0025 #define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
0026 #define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT)
0027
0028 #define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT)
0029 #define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
0030
0031 #define IPU3_L2PT_SHIFT IPU3_PT_BITS
0032 #define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1)
0033
0034 #define IPU3_L1PT_SHIFT IPU3_PT_BITS
0035 #define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1)
0036
0037 #define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \
0038 IPU3_L2PT_SHIFT + \
0039 IPU3_L1PT_SHIFT)
0040
0041 #define IMGU_REG_BASE 0x4000
0042 #define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
0043 #define TLB_INVALIDATE 1
0044 #define REG_L1_PHYS (IMGU_REG_BASE + 0x304)
0045 #define REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
0046 #define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0)
0047
0048 struct imgu_mmu {
0049 struct device *dev;
0050 void __iomem *base;
0051
0052 spinlock_t lock;
0053
0054 void *dummy_page;
0055 u32 dummy_page_pteval;
0056
0057 u32 *dummy_l2pt;
0058 u32 dummy_l2pt_pteval;
0059
0060 u32 **l2pts;
0061 u32 *l1pt;
0062
0063 struct imgu_mmu_info geometry;
0064 };
0065
0066 static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
0067 {
0068 return container_of(info, struct imgu_mmu, geometry);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
0079 {
0080 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
0081 }
0082
0083 static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
0084 void (*func)(struct imgu_mmu *mmu))
0085 {
0086 if (!pm_runtime_get_if_in_use(mmu->dev))
0087 return;
0088
0089 func(mmu);
0090 pm_runtime_put(mmu->dev);
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
0102 {
0103 int ret;
0104 u32 val;
0105
0106 writel(halt, mmu->base + REG_GP_HALT);
0107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
0108 val, (val & 1) == halt, 1000, 100000);
0109
0110 if (ret)
0111 dev_err(mmu->dev, "failed to %s CIO gate halt\n",
0112 halt ? "set" : "clear");
0113 }
0114
0115
0116
0117
0118
0119
0120
0121 static u32 *imgu_mmu_alloc_page_table(u32 pteval)
0122 {
0123 u32 *pt;
0124 int pte;
0125
0126 pt = (u32 *)__get_free_page(GFP_KERNEL);
0127 if (!pt)
0128 return NULL;
0129
0130 for (pte = 0; pte < IPU3_PT_PTES; pte++)
0131 pt[pte] = pteval;
0132
0133 set_memory_uc((unsigned long)pt, IPU3_PT_ORDER);
0134
0135 return pt;
0136 }
0137
0138
0139
0140
0141
0142 static void imgu_mmu_free_page_table(u32 *pt)
0143 {
0144 set_memory_wb((unsigned long)pt, IPU3_PT_ORDER);
0145 free_page((unsigned long)pt);
0146 }
0147
0148
0149
0150
0151
0152
0153
0154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
0155 u32 *l2pt_idx)
0156 {
0157 iova >>= IPU3_PAGE_SHIFT;
0158
0159 if (l2pt_idx)
0160 *l2pt_idx = iova & IPU3_L2PT_MASK;
0161
0162 iova >>= IPU3_L2PT_SHIFT;
0163
0164 if (l1pt_idx)
0165 *l1pt_idx = iova & IPU3_L1PT_MASK;
0166 }
0167
0168 static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
0169 {
0170 unsigned long flags;
0171 u32 *l2pt, *new_l2pt;
0172 u32 pteval;
0173
0174 spin_lock_irqsave(&mmu->lock, flags);
0175
0176 l2pt = mmu->l2pts[l1pt_idx];
0177 if (l2pt) {
0178 spin_unlock_irqrestore(&mmu->lock, flags);
0179 return l2pt;
0180 }
0181
0182 spin_unlock_irqrestore(&mmu->lock, flags);
0183
0184 new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
0185 if (!new_l2pt)
0186 return NULL;
0187
0188 spin_lock_irqsave(&mmu->lock, flags);
0189
0190 dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
0191 new_l2pt, l1pt_idx);
0192
0193 l2pt = mmu->l2pts[l1pt_idx];
0194 if (l2pt) {
0195 spin_unlock_irqrestore(&mmu->lock, flags);
0196 imgu_mmu_free_page_table(new_l2pt);
0197 return l2pt;
0198 }
0199
0200 l2pt = new_l2pt;
0201 mmu->l2pts[l1pt_idx] = new_l2pt;
0202
0203 pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
0204 mmu->l1pt[l1pt_idx] = pteval;
0205
0206 spin_unlock_irqrestore(&mmu->lock, flags);
0207 return l2pt;
0208 }
0209
0210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
0211 phys_addr_t paddr)
0212 {
0213 u32 l1pt_idx, l2pt_idx;
0214 unsigned long flags;
0215 u32 *l2pt;
0216
0217 if (!mmu)
0218 return -ENODEV;
0219
0220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
0221
0222 l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
0223 if (!l2pt)
0224 return -ENOMEM;
0225
0226 spin_lock_irqsave(&mmu->lock, flags);
0227
0228 if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
0229 spin_unlock_irqrestore(&mmu->lock, flags);
0230 return -EBUSY;
0231 }
0232
0233 l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
0234
0235 spin_unlock_irqrestore(&mmu->lock, flags);
0236
0237 return 0;
0238 }
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
0252 phys_addr_t paddr, size_t size)
0253 {
0254 struct imgu_mmu *mmu = to_imgu_mmu(info);
0255 int ret = 0;
0256
0257
0258
0259
0260
0261
0262 if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
0263 dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
0264 iova, &paddr, size);
0265 return -EINVAL;
0266 }
0267
0268 dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
0269 iova, &paddr, size);
0270
0271 while (size) {
0272 dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
0273
0274 ret = __imgu_mmu_map(mmu, iova, paddr);
0275 if (ret)
0276 break;
0277
0278 iova += IPU3_PAGE_SIZE;
0279 paddr += IPU3_PAGE_SIZE;
0280 size -= IPU3_PAGE_SIZE;
0281 }
0282
0283 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
0284
0285 return ret;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
0300 struct scatterlist *sg, unsigned int nents)
0301 {
0302 struct imgu_mmu *mmu = to_imgu_mmu(info);
0303 struct scatterlist *s;
0304 size_t s_length, mapped = 0;
0305 unsigned int i;
0306 int ret;
0307
0308 for_each_sg(sg, s, nents, i) {
0309 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
0310
0311 s_length = s->length;
0312
0313 if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
0314 goto out_err;
0315
0316
0317 if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
0318 s_length = PAGE_ALIGN(s->length);
0319
0320 ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
0321 if (ret)
0322 goto out_err;
0323
0324 mapped += s_length;
0325 }
0326
0327 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
0328
0329 return mapped;
0330
0331 out_err:
0332
0333 imgu_mmu_unmap(info, iova, mapped);
0334
0335 return 0;
0336 }
0337
0338 static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
0339 unsigned long iova, size_t size)
0340 {
0341 u32 l1pt_idx, l2pt_idx;
0342 unsigned long flags;
0343 size_t unmap = size;
0344 u32 *l2pt;
0345
0346 if (!mmu)
0347 return 0;
0348
0349 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
0350
0351 spin_lock_irqsave(&mmu->lock, flags);
0352
0353 l2pt = mmu->l2pts[l1pt_idx];
0354 if (!l2pt) {
0355 spin_unlock_irqrestore(&mmu->lock, flags);
0356 return 0;
0357 }
0358
0359 if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
0360 unmap = 0;
0361
0362 l2pt[l2pt_idx] = mmu->dummy_page_pteval;
0363
0364 spin_unlock_irqrestore(&mmu->lock, flags);
0365
0366 return unmap;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
0380 size_t size)
0381 {
0382 struct imgu_mmu *mmu = to_imgu_mmu(info);
0383 size_t unmapped_page, unmapped = 0;
0384
0385
0386
0387
0388
0389
0390 if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
0391 dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
0392 iova, size);
0393 return -EINVAL;
0394 }
0395
0396 dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
0397
0398
0399
0400
0401
0402 while (unmapped < size) {
0403 unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
0404 if (!unmapped_page)
0405 break;
0406
0407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
0408 iova, unmapped_page);
0409
0410 iova += unmapped_page;
0411 unmapped += unmapped_page;
0412 }
0413
0414 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
0415
0416 return unmapped;
0417 }
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
0428 {
0429 struct imgu_mmu *mmu;
0430 u32 pteval;
0431
0432 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
0433 if (!mmu)
0434 return ERR_PTR(-ENOMEM);
0435
0436 mmu->dev = parent;
0437 mmu->base = base;
0438 spin_lock_init(&mmu->lock);
0439
0440
0441 imgu_mmu_set_halt(mmu, true);
0442
0443
0444
0445
0446
0447 mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
0448 if (!mmu->dummy_page)
0449 goto fail_group;
0450 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
0451 mmu->dummy_page_pteval = pteval;
0452
0453
0454
0455
0456
0457 mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
0458 if (!mmu->dummy_l2pt)
0459 goto fail_dummy_page;
0460 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
0461 mmu->dummy_l2pt_pteval = pteval;
0462
0463
0464
0465
0466
0467 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
0468 if (!mmu->l2pts)
0469 goto fail_l2pt;
0470
0471
0472 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
0473 if (!mmu->l1pt)
0474 goto fail_l2pts;
0475
0476 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
0477 writel(pteval, mmu->base + REG_L1_PHYS);
0478 imgu_mmu_tlb_invalidate(mmu);
0479 imgu_mmu_set_halt(mmu, false);
0480
0481 mmu->geometry.aperture_start = 0;
0482 mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
0483
0484 return &mmu->geometry;
0485
0486 fail_l2pts:
0487 vfree(mmu->l2pts);
0488 fail_l2pt:
0489 imgu_mmu_free_page_table(mmu->dummy_l2pt);
0490 fail_dummy_page:
0491 free_page((unsigned long)mmu->dummy_page);
0492 fail_group:
0493 kfree(mmu);
0494
0495 return ERR_PTR(-ENOMEM);
0496 }
0497
0498
0499
0500
0501
0502
0503 void imgu_mmu_exit(struct imgu_mmu_info *info)
0504 {
0505 struct imgu_mmu *mmu = to_imgu_mmu(info);
0506
0507
0508 imgu_mmu_set_halt(mmu, true);
0509 imgu_mmu_tlb_invalidate(mmu);
0510
0511 imgu_mmu_free_page_table(mmu->l1pt);
0512 vfree(mmu->l2pts);
0513 imgu_mmu_free_page_table(mmu->dummy_l2pt);
0514 free_page((unsigned long)mmu->dummy_page);
0515 kfree(mmu);
0516 }
0517
0518 void imgu_mmu_suspend(struct imgu_mmu_info *info)
0519 {
0520 struct imgu_mmu *mmu = to_imgu_mmu(info);
0521
0522 imgu_mmu_set_halt(mmu, true);
0523 }
0524
0525 void imgu_mmu_resume(struct imgu_mmu_info *info)
0526 {
0527 struct imgu_mmu *mmu = to_imgu_mmu(info);
0528 u32 pteval;
0529
0530 imgu_mmu_set_halt(mmu, true);
0531
0532 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
0533 writel(pteval, mmu->base + REG_L1_PHYS);
0534
0535 imgu_mmu_tlb_invalidate(mmu);
0536 imgu_mmu_set_halt(mmu, false);
0537 }