0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #define NVKM_VMM_LEVELS_MAX 5
0023 #include "vmm.h"
0024
0025 #include <subdev/fb.h>
0026
0027 static void
0028 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
0029 {
0030 struct nvkm_vmm_pt *pgt = *ppgt;
0031 if (pgt) {
0032 kvfree(pgt->pde);
0033 kfree(pgt);
0034 *ppgt = NULL;
0035 }
0036 }
0037
0038
0039 static struct nvkm_vmm_pt *
0040 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
0041 const struct nvkm_vmm_page *page)
0042 {
0043 const u32 pten = 1 << desc->bits;
0044 struct nvkm_vmm_pt *pgt;
0045 u32 lpte = 0;
0046
0047 if (desc->type > PGT) {
0048 if (desc->type == SPT) {
0049 const struct nvkm_vmm_desc *pair = page[-1].desc;
0050 lpte = pten >> (desc->bits - pair->bits);
0051 } else {
0052 lpte = pten;
0053 }
0054 }
0055
0056 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
0057 return NULL;
0058 pgt->page = page ? page->shift : 0;
0059 pgt->sparse = sparse;
0060
0061 if (desc->type == PGD) {
0062 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
0063 if (!pgt->pde) {
0064 kfree(pgt);
0065 return NULL;
0066 }
0067 }
0068
0069 return pgt;
0070 }
0071
0072 struct nvkm_vmm_iter {
0073 const struct nvkm_vmm_page *page;
0074 const struct nvkm_vmm_desc *desc;
0075 struct nvkm_vmm *vmm;
0076 u64 cnt;
0077 u16 max, lvl;
0078 u32 pte[NVKM_VMM_LEVELS_MAX];
0079 struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
0080 int flush;
0081 };
0082
0083 #ifdef CONFIG_NOUVEAU_DEBUG_MMU
0084 static const char *
0085 nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
0086 {
0087 switch (desc->type) {
0088 case PGD: return "PGD";
0089 case PGT: return "PGT";
0090 case SPT: return "SPT";
0091 case LPT: return "LPT";
0092 default:
0093 return "UNKNOWN";
0094 }
0095 }
0096
0097 static void
0098 nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
0099 {
0100 int lvl;
0101 for (lvl = it->max; lvl >= 0; lvl--) {
0102 if (lvl >= it->lvl)
0103 buf += sprintf(buf, "%05x:", it->pte[lvl]);
0104 else
0105 buf += sprintf(buf, "xxxxx:");
0106 }
0107 }
0108
0109 #define TRA(i,f,a...) do { \
0110 char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
0111 struct nvkm_vmm_iter *_it = (i); \
0112 nvkm_vmm_trace(_it, _buf); \
0113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
0114 } while(0)
0115 #else
0116 #define TRA(i,f,a...)
0117 #endif
0118
0119 static inline void
0120 nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
0121 {
0122 it->flush = min(it->flush, it->max - it->lvl);
0123 }
0124
0125 static inline void
0126 nvkm_vmm_flush(struct nvkm_vmm_iter *it)
0127 {
0128 if (it->flush != NVKM_VMM_LEVELS_MAX) {
0129 if (it->vmm->func->flush) {
0130 TRA(it, "flush: %d", it->flush);
0131 it->vmm->func->flush(it->vmm, it->flush);
0132 }
0133 it->flush = NVKM_VMM_LEVELS_MAX;
0134 }
0135 }
0136
0137 static void
0138 nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
0139 {
0140 const struct nvkm_vmm_desc *desc = it->desc;
0141 const int type = desc[it->lvl].type == SPT;
0142 struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
0143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
0144 struct nvkm_mmu_pt *pt = pgt->pt[type];
0145 struct nvkm_vmm *vmm = it->vmm;
0146 u32 pdei = it->pte[it->lvl + 1];
0147
0148
0149 it->lvl++;
0150 if (--pgd->refs[0]) {
0151 const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
0152
0153 TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
0154 pgt->pt[type] = NULL;
0155 if (!pgt->refs[!type]) {
0156
0157 if (pgd->pt[0]) {
0158 if (pgt->sparse) {
0159 func->sparse(vmm, pgd->pt[0], pdei, 1);
0160 pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
0161 } else {
0162 func->unmap(vmm, pgd->pt[0], pdei, 1);
0163 pgd->pde[pdei] = NULL;
0164 }
0165 } else {
0166
0167
0168
0169
0170 func->pde(vmm, pgd, pdei);
0171 pgd->pde[pdei] = NULL;
0172 }
0173 } else {
0174
0175
0176
0177 func->pde(vmm, pgd, pdei);
0178 }
0179
0180
0181 nvkm_vmm_flush_mark(it);
0182 nvkm_vmm_flush(it);
0183 } else {
0184
0185 nvkm_vmm_unref_pdes(it);
0186 }
0187
0188
0189 TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
0190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
0191 if (!pgt->refs[!type])
0192 nvkm_vmm_pt_del(&pgt);
0193 it->lvl--;
0194 }
0195
0196 static void
0197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
0198 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
0199 {
0200 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
0201 const u32 sptb = desc->bits - pair->bits;
0202 const u32 sptn = 1 << sptb;
0203 struct nvkm_vmm *vmm = it->vmm;
0204 u32 spti = ptei & (sptn - 1), lpti, pteb;
0205
0206
0207
0208
0209 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
0210 const u32 pten = min(sptn - spti, ptes);
0211 pgt->pte[lpti] -= pten;
0212 ptes -= pten;
0213 }
0214
0215
0216 if (!pgt->refs[0])
0217 return;
0218
0219 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
0220
0221 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
0222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
0223 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
0224 break;
0225 }
0226 continue;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
0236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
0237 if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
0238 break;
0239 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
0240 }
0241
0242 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
0243 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
0244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
0245 } else
0246 if (pair->func->invalid) {
0247
0248
0249
0250
0251 TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
0252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
0253 }
0254 }
0255 }
0256
0257 static bool
0258 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
0259 {
0260 const struct nvkm_vmm_desc *desc = it->desc;
0261 const int type = desc->type == SPT;
0262 struct nvkm_vmm_pt *pgt = it->pt[0];
0263 bool dma;
0264
0265 if (pfn) {
0266
0267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
0268 if (dma) {
0269
0270 nvkm_vmm_flush_mark(it);
0271 nvkm_vmm_flush(it);
0272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
0273 }
0274 }
0275
0276
0277 pgt->refs[type] -= ptes;
0278
0279
0280 if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
0281 nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
0282
0283
0284 if (!pgt->refs[type]) {
0285 it->lvl++;
0286 TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
0287 it->lvl--;
0288 nvkm_vmm_unref_pdes(it);
0289 return false;
0290 }
0291
0292 return true;
0293 }
0294
0295 static void
0296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
0297 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
0298 {
0299 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
0300 const u32 sptb = desc->bits - pair->bits;
0301 const u32 sptn = 1 << sptb;
0302 struct nvkm_vmm *vmm = it->vmm;
0303 u32 spti = ptei & (sptn - 1), lpti, pteb;
0304
0305
0306
0307
0308 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
0309 const u32 pten = min(sptn - spti, ptes);
0310 pgt->pte[lpti] += pten;
0311 ptes -= pten;
0312 }
0313
0314
0315 if (!pgt->refs[0])
0316 return;
0317
0318 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
0319
0320 if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
0321 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
0322 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
0323 break;
0324 }
0325 continue;
0326 }
0327
0328
0329
0330
0331
0332
0333
0334 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
0335 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
0336 if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
0337 break;
0338 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
0339 }
0340
0341 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
0342 const u32 spti = pteb * sptn;
0343 const u32 sptc = ptes * sptn;
0344
0345
0346
0347 TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
0348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
0349
0350 TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
0351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
0352 } else
0353 if (pair->func->invalid) {
0354
0355
0356
0357 TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
0358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
0359 }
0360 }
0361 }
0362
0363 static bool
0364 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
0365 {
0366 const struct nvkm_vmm_desc *desc = it->desc;
0367 const int type = desc->type == SPT;
0368 struct nvkm_vmm_pt *pgt = it->pt[0];
0369
0370
0371 pgt->refs[type] += ptes;
0372
0373
0374 if (desc->type == SPT)
0375 nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
0376
0377 return true;
0378 }
0379
0380 static void
0381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
0382 struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
0383 {
0384 if (desc->type == PGD) {
0385 while (ptes--)
0386 pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
0387 } else
0388 if (desc->type == LPT) {
0389 memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
0390 }
0391 }
0392
0393 static bool
0394 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
0395 {
0396 struct nvkm_vmm_pt *pt = it->pt[0];
0397 if (it->desc->type == PGD)
0398 memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
0399 else
0400 if (it->desc->type == LPT)
0401 memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
0402 return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
0403 }
0404
0405 static bool
0406 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
0407 {
0408 nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
0409 return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
0410 }
0411
0412 static bool
0413 nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
0414 {
0415 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
0416 const int type = desc->type == SPT;
0417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
0418 const bool zero = !pgt->sparse && !desc->func->invalid;
0419 struct nvkm_vmm *vmm = it->vmm;
0420 struct nvkm_mmu *mmu = vmm->mmu;
0421 struct nvkm_mmu_pt *pt;
0422 u32 pten = 1 << desc->bits;
0423 u32 pteb, ptei, ptes;
0424 u32 size = desc->size * pten;
0425
0426 pgd->refs[0]++;
0427
0428 pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
0429 if (!pgt->pt[type]) {
0430 it->lvl--;
0431 nvkm_vmm_unref_pdes(it);
0432 return false;
0433 }
0434
0435 if (zero)
0436 goto done;
0437
0438 pt = pgt->pt[type];
0439
0440 if (desc->type == LPT && pgt->refs[1]) {
0441
0442
0443
0444
0445
0446
0447 for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
0448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
0449 for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
0450 bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
0451 if (spte != next)
0452 break;
0453 }
0454
0455 if (!spte) {
0456 if (pgt->sparse)
0457 desc->func->sparse(vmm, pt, pteb, ptes);
0458 else
0459 desc->func->invalid(vmm, pt, pteb, ptes);
0460 memset(&pgt->pte[pteb], 0x00, ptes);
0461 } else {
0462 desc->func->unmap(vmm, pt, pteb, ptes);
0463 while (ptes--)
0464 pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
0465 }
0466 }
0467 } else {
0468 if (pgt->sparse) {
0469 nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
0470 desc->func->sparse(vmm, pt, 0, pten);
0471 } else {
0472 desc->func->invalid(vmm, pt, 0, pten);
0473 }
0474 }
0475
0476 done:
0477 TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
0478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
0479 nvkm_vmm_flush_mark(it);
0480 return true;
0481 }
0482
0483 static bool
0484 nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
0485 {
0486 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
0487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
0488
0489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
0490 if (!pgt) {
0491 if (!pgd->refs[0])
0492 nvkm_vmm_unref_pdes(it);
0493 return false;
0494 }
0495
0496 pgd->pde[pdei] = pgt;
0497 return true;
0498 }
0499
0500 static inline u64
0501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0502 u64 addr, u64 size, const char *name, bool ref, bool pfn,
0503 bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
0504 nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
0505 nvkm_vmm_pxe_func CLR_PTES)
0506 {
0507 const struct nvkm_vmm_desc *desc = page->desc;
0508 struct nvkm_vmm_iter it;
0509 u64 bits = addr >> page->shift;
0510
0511 it.page = page;
0512 it.desc = desc;
0513 it.vmm = vmm;
0514 it.cnt = size >> page->shift;
0515 it.flush = NVKM_VMM_LEVELS_MAX;
0516
0517
0518 for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
0519 it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
0520 bits >>= desc[it.lvl].bits;
0521 }
0522 it.max = --it.lvl;
0523 it.pt[it.max] = vmm->pd;
0524
0525 it.lvl = 0;
0526 TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
0527 addr, size, page->shift, it.cnt);
0528 it.lvl = it.max;
0529
0530
0531 while (it.cnt) {
0532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
0533 const int type = desc->type == SPT;
0534 const u32 pten = 1 << desc->bits;
0535 const u32 ptei = it.pte[0];
0536 const u32 ptes = min_t(u64, it.cnt, pten - ptei);
0537
0538
0539 for (; it.lvl; it.lvl--) {
0540 const u32 pdei = it.pte[it.lvl];
0541 struct nvkm_vmm_pt *pgd = pgt;
0542
0543
0544 if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
0545 if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
0546 goto fail;
0547 }
0548 it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
0549
0550
0551
0552
0553
0554
0555
0556 if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
0557 if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
0558 goto fail;
0559 }
0560 }
0561
0562
0563 if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
0564 struct nvkm_mmu_pt *pt = pgt->pt[type];
0565 if (MAP_PTES || CLR_PTES) {
0566 if (MAP_PTES)
0567 MAP_PTES(vmm, pt, ptei, ptes, map);
0568 else
0569 CLR_PTES(vmm, pt, ptei, ptes);
0570 nvkm_vmm_flush_mark(&it);
0571 }
0572 }
0573
0574
0575 it.pte[it.lvl] += ptes;
0576 it.cnt -= ptes;
0577 if (it.cnt) {
0578 while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
0579 it.pte[it.lvl++] = 0;
0580 it.pte[it.lvl]++;
0581 }
0582 }
0583 }
0584
0585 nvkm_vmm_flush(&it);
0586 return ~0ULL;
0587
0588 fail:
0589
0590
0591
0592 addr = it.pte[it.max--];
0593 do {
0594 addr = addr << desc[it.max].bits;
0595 addr |= it.pte[it.max];
0596 } while (it.max--);
0597
0598 return addr << page->shift;
0599 }
0600
0601 static void
0602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0603 u64 addr, u64 size)
0604 {
0605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
0606 nvkm_vmm_sparse_unref_ptes, NULL, NULL,
0607 page->desc->func->invalid ?
0608 page->desc->func->invalid : page->desc->func->unmap);
0609 }
0610
0611 static int
0612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0613 u64 addr, u64 size)
0614 {
0615 if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
0616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
0617 true, false, nvkm_vmm_sparse_ref_ptes,
0618 NULL, NULL, page->desc->func->sparse);
0619 if (fail != ~0ULL) {
0620 if ((size = fail - addr))
0621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
0622 return -ENOMEM;
0623 }
0624 return 0;
0625 }
0626 return -EINVAL;
0627 }
0628
0629 static int
0630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
0631 {
0632 const struct nvkm_vmm_page *page = vmm->func->page;
0633 int m = 0, i;
0634 u64 start = addr;
0635 u64 block;
0636
0637 while (size) {
0638
0639 while (size < (1ULL << page[m].shift))
0640 m++;
0641 i = m;
0642
0643
0644 while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
0645 i++;
0646
0647
0648 if (i != m) {
0649
0650 u64 next = 1ULL << page[i - 1].shift;
0651 u64 part = ALIGN(addr, next) - addr;
0652 if (size - part >= next)
0653 block = (part >> page[i].shift) << page[i].shift;
0654 else
0655 block = (size >> page[i].shift) << page[i].shift;
0656 } else {
0657 block = (size >> page[i].shift) << page[i].shift;
0658 }
0659
0660
0661 if (ref) {
0662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
0663 if (ret) {
0664 if ((size = addr - start))
0665 nvkm_vmm_ptes_sparse(vmm, start, size, false);
0666 return ret;
0667 }
0668 } else {
0669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
0670 }
0671
0672 size -= block;
0673 addr += block;
0674 }
0675
0676 return 0;
0677 }
0678
0679 static void
0680 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0681 u64 addr, u64 size, bool sparse, bool pfn)
0682 {
0683 const struct nvkm_vmm_desc_func *func = page->desc->func;
0684 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
0685 false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
0686 sparse ? func->sparse : func->invalid ? func->invalid :
0687 func->unmap);
0688 }
0689
0690 static int
0691 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0692 u64 addr, u64 size, struct nvkm_vmm_map *map,
0693 nvkm_vmm_pte_func func)
0694 {
0695 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
0696 false, nvkm_vmm_ref_ptes, func, map, NULL);
0697 if (fail != ~0ULL) {
0698 if ((size = fail - addr))
0699 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
0700 return -ENOMEM;
0701 }
0702 return 0;
0703 }
0704
0705 static void
0706 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0707 u64 addr, u64 size, bool sparse, bool pfn)
0708 {
0709 const struct nvkm_vmm_desc_func *func = page->desc->func;
0710 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
0711 NULL, NULL, NULL,
0712 sparse ? func->sparse : func->invalid ? func->invalid :
0713 func->unmap);
0714 }
0715
0716 static void
0717 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0718 u64 addr, u64 size, struct nvkm_vmm_map *map,
0719 nvkm_vmm_pte_func func)
0720 {
0721 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
0722 NULL, func, map, NULL);
0723 }
0724
0725 static void
0726 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0727 u64 addr, u64 size)
0728 {
0729 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
0730 nvkm_vmm_unref_ptes, NULL, NULL, NULL);
0731 }
0732
0733 static int
0734 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
0735 u64 addr, u64 size)
0736 {
0737 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
0738 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
0739 if (fail != ~0ULL) {
0740 if (fail != addr)
0741 nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
0742 return -ENOMEM;
0743 }
0744 return 0;
0745 }
0746
0747 static inline struct nvkm_vma *
0748 nvkm_vma_new(u64 addr, u64 size)
0749 {
0750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
0751 if (vma) {
0752 vma->addr = addr;
0753 vma->size = size;
0754 vma->page = NVKM_VMA_PAGE_NONE;
0755 vma->refd = NVKM_VMA_PAGE_NONE;
0756 }
0757 return vma;
0758 }
0759
0760 struct nvkm_vma *
0761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
0762 {
0763 struct nvkm_vma *new;
0764
0765 BUG_ON(vma->size == tail);
0766
0767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
0768 return NULL;
0769 vma->size -= tail;
0770
0771 new->mapref = vma->mapref;
0772 new->sparse = vma->sparse;
0773 new->page = vma->page;
0774 new->refd = vma->refd;
0775 new->used = vma->used;
0776 new->part = vma->part;
0777 new->busy = vma->busy;
0778 new->mapped = vma->mapped;
0779 list_add(&new->head, &vma->head);
0780 return new;
0781 }
0782
0783 static inline void
0784 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0785 {
0786 rb_erase(&vma->tree, &vmm->free);
0787 }
0788
0789 static inline void
0790 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0791 {
0792 nvkm_vmm_free_remove(vmm, vma);
0793 list_del(&vma->head);
0794 kfree(vma);
0795 }
0796
0797 static void
0798 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0799 {
0800 struct rb_node **ptr = &vmm->free.rb_node;
0801 struct rb_node *parent = NULL;
0802
0803 while (*ptr) {
0804 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
0805 parent = *ptr;
0806 if (vma->size < this->size)
0807 ptr = &parent->rb_left;
0808 else
0809 if (vma->size > this->size)
0810 ptr = &parent->rb_right;
0811 else
0812 if (vma->addr < this->addr)
0813 ptr = &parent->rb_left;
0814 else
0815 if (vma->addr > this->addr)
0816 ptr = &parent->rb_right;
0817 else
0818 BUG();
0819 }
0820
0821 rb_link_node(&vma->tree, parent, ptr);
0822 rb_insert_color(&vma->tree, &vmm->free);
0823 }
0824
0825 static inline void
0826 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0827 {
0828 rb_erase(&vma->tree, &vmm->root);
0829 }
0830
0831 static inline void
0832 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0833 {
0834 nvkm_vmm_node_remove(vmm, vma);
0835 list_del(&vma->head);
0836 kfree(vma);
0837 }
0838
0839 static void
0840 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
0841 {
0842 struct rb_node **ptr = &vmm->root.rb_node;
0843 struct rb_node *parent = NULL;
0844
0845 while (*ptr) {
0846 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
0847 parent = *ptr;
0848 if (vma->addr < this->addr)
0849 ptr = &parent->rb_left;
0850 else
0851 if (vma->addr > this->addr)
0852 ptr = &parent->rb_right;
0853 else
0854 BUG();
0855 }
0856
0857 rb_link_node(&vma->tree, parent, ptr);
0858 rb_insert_color(&vma->tree, &vmm->root);
0859 }
0860
0861 struct nvkm_vma *
0862 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
0863 {
0864 struct rb_node *node = vmm->root.rb_node;
0865 while (node) {
0866 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
0867 if (addr < vma->addr)
0868 node = node->rb_left;
0869 else
0870 if (addr >= vma->addr + vma->size)
0871 node = node->rb_right;
0872 else
0873 return vma;
0874 }
0875 return NULL;
0876 }
0877
0878 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
0879 list_entry((root)->head.dir, struct nvkm_vma, head))
0880
0881 static struct nvkm_vma *
0882 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
0883 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
0884 {
0885 if (next) {
0886 if (vma->size == size) {
0887 vma->size += next->size;
0888 nvkm_vmm_node_delete(vmm, next);
0889 if (prev) {
0890 prev->size += vma->size;
0891 nvkm_vmm_node_delete(vmm, vma);
0892 return prev;
0893 }
0894 return vma;
0895 }
0896 BUG_ON(prev);
0897
0898 nvkm_vmm_node_remove(vmm, next);
0899 vma->size -= size;
0900 next->addr -= size;
0901 next->size += size;
0902 nvkm_vmm_node_insert(vmm, next);
0903 return next;
0904 }
0905
0906 if (prev) {
0907 if (vma->size != size) {
0908 nvkm_vmm_node_remove(vmm, vma);
0909 prev->size += size;
0910 vma->addr += size;
0911 vma->size -= size;
0912 nvkm_vmm_node_insert(vmm, vma);
0913 } else {
0914 prev->size += vma->size;
0915 nvkm_vmm_node_delete(vmm, vma);
0916 }
0917 return prev;
0918 }
0919
0920 return vma;
0921 }
0922
0923 struct nvkm_vma *
0924 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
0925 struct nvkm_vma *vma, u64 addr, u64 size)
0926 {
0927 struct nvkm_vma *prev = NULL;
0928
0929 if (vma->addr != addr) {
0930 prev = vma;
0931 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
0932 return NULL;
0933 vma->part = true;
0934 nvkm_vmm_node_insert(vmm, vma);
0935 }
0936
0937 if (vma->size != size) {
0938 struct nvkm_vma *tmp;
0939 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
0940 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
0941 return NULL;
0942 }
0943 tmp->part = true;
0944 nvkm_vmm_node_insert(vmm, tmp);
0945 }
0946
0947 return vma;
0948 }
0949
0950 static void
0951 nvkm_vma_dump(struct nvkm_vma *vma)
0952 {
0953 printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
0954 vma->addr, (u64)vma->size,
0955 vma->used ? '-' : 'F',
0956 vma->mapref ? 'R' : '-',
0957 vma->sparse ? 'S' : '-',
0958 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
0959 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
0960 vma->part ? 'P' : '-',
0961 vma->busy ? 'B' : '-',
0962 vma->mapped ? 'M' : '-',
0963 vma->memory);
0964 }
0965
0966 static void
0967 nvkm_vmm_dump(struct nvkm_vmm *vmm)
0968 {
0969 struct nvkm_vma *vma;
0970 list_for_each_entry(vma, &vmm->list, head) {
0971 nvkm_vma_dump(vma);
0972 }
0973 }
0974
0975 static void
0976 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
0977 {
0978 struct nvkm_vma *vma;
0979 struct rb_node *node;
0980
0981 if (0)
0982 nvkm_vmm_dump(vmm);
0983
0984 while ((node = rb_first(&vmm->root))) {
0985 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
0986 nvkm_vmm_put(vmm, &vma);
0987 }
0988
0989 if (vmm->bootstrapped) {
0990 const struct nvkm_vmm_page *page = vmm->func->page;
0991 const u64 limit = vmm->limit - vmm->start;
0992
0993 while (page[1].shift)
0994 page++;
0995
0996 nvkm_mmu_ptc_dump(vmm->mmu);
0997 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
0998 }
0999
1000 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1001 list_del(&vma->head);
1002 kfree(vma);
1003 WARN_ON(!list_empty(&vmm->list));
1004
1005 if (vmm->nullp) {
1006 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1007 vmm->nullp, vmm->null);
1008 }
1009
1010 if (vmm->pd) {
1011 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1012 nvkm_vmm_pt_del(&vmm->pd);
1013 }
1014 }
1015
1016 static int
1017 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1018 {
1019 struct nvkm_vma *vma;
1020 if (!(vma = nvkm_vma_new(addr, size)))
1021 return -ENOMEM;
1022 vma->mapref = true;
1023 vma->sparse = false;
1024 vma->used = true;
1025 nvkm_vmm_node_insert(vmm, vma);
1026 list_add_tail(&vma->head, &vmm->list);
1027 return 0;
1028 }
1029
1030 static int
1031 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1032 u32 pd_header, bool managed, u64 addr, u64 size,
1033 struct lock_class_key *key, const char *name,
1034 struct nvkm_vmm *vmm)
1035 {
1036 static struct lock_class_key _key;
1037 const struct nvkm_vmm_page *page = func->page;
1038 const struct nvkm_vmm_desc *desc;
1039 struct nvkm_vma *vma;
1040 int levels, bits = 0, ret;
1041
1042 vmm->func = func;
1043 vmm->mmu = mmu;
1044 vmm->name = name;
1045 vmm->debug = mmu->subdev.debug;
1046 kref_init(&vmm->kref);
1047
1048 __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
1049
1050
1051
1052
1053 while (page[1].shift)
1054 page++;
1055
1056
1057
1058
1059
1060 for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
1061 bits += desc->bits;
1062 bits += page->shift;
1063 desc--;
1064
1065 if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
1066 return -EINVAL;
1067
1068
1069 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1070 if (!vmm->pd)
1071 return -ENOMEM;
1072 vmm->pd->refs[0] = 1;
1073 INIT_LIST_HEAD(&vmm->join);
1074
1075
1076
1077
1078 if (desc->size) {
1079 const u32 size = pd_header + desc->size * (1 << desc->bits);
1080 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1081 if (!vmm->pd->pt[0])
1082 return -ENOMEM;
1083 }
1084
1085
1086 INIT_LIST_HEAD(&vmm->list);
1087 vmm->free = RB_ROOT;
1088 vmm->root = RB_ROOT;
1089
1090 if (managed) {
1091
1092
1093
1094
1095 vmm->start = 0;
1096 vmm->limit = 1ULL << bits;
1097 if (addr + size < addr || addr + size > vmm->limit)
1098 return -EINVAL;
1099
1100
1101 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1102 return ret;
1103
1104
1105 if (size) {
1106 if (!(vma = nvkm_vma_new(addr, size)))
1107 return -ENOMEM;
1108 nvkm_vmm_free_insert(vmm, vma);
1109 list_add_tail(&vma->head, &vmm->list);
1110 }
1111
1112
1113 addr = addr + size;
1114 size = vmm->limit - addr;
1115 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1116 return ret;
1117 } else {
1118
1119
1120
1121 vmm->start = addr;
1122 vmm->limit = size ? (addr + size) : (1ULL << bits);
1123 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1124 return -EINVAL;
1125
1126 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1127 return -ENOMEM;
1128
1129 nvkm_vmm_free_insert(vmm, vma);
1130 list_add(&vma->head, &vmm->list);
1131 }
1132
1133 return 0;
1134 }
1135
1136 int
1137 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1138 u32 hdr, bool managed, u64 addr, u64 size,
1139 struct lock_class_key *key, const char *name,
1140 struct nvkm_vmm **pvmm)
1141 {
1142 if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
1143 return -ENOMEM;
1144 return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
1145 }
1146
1147 static struct nvkm_vma *
1148 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1149 u64 addr, u64 size, u8 page, bool map)
1150 {
1151 struct nvkm_vma *prev = NULL;
1152 struct nvkm_vma *next = NULL;
1153
1154 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1155 if (prev->memory || prev->mapped != map)
1156 prev = NULL;
1157 }
1158
1159 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1160 if (!next->part ||
1161 next->memory || next->mapped != map)
1162 next = NULL;
1163 }
1164
1165 if (prev || next)
1166 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1167 return nvkm_vmm_node_split(vmm, vma, addr, size);
1168 }
1169
1170 int
1171 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1172 {
1173 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1174 struct nvkm_vma *next;
1175 u64 limit = addr + size;
1176 u64 start = addr;
1177
1178 if (!vma)
1179 return -EINVAL;
1180
1181 do {
1182 if (!vma->mapped || vma->memory)
1183 continue;
1184
1185 size = min(limit - start, vma->size - (start - vma->addr));
1186
1187 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1188 start, size, false, true);
1189
1190 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1191 if (!WARN_ON(!next)) {
1192 vma = next;
1193 vma->refd = NVKM_VMA_PAGE_NONE;
1194 vma->mapped = false;
1195 }
1196 } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1197
1198 return 0;
1199 }
1200
1201
1202
1203
1204
1205
1206 int
1207 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1208 {
1209 const struct nvkm_vmm_page *page = vmm->func->page;
1210 struct nvkm_vma *vma, *tmp;
1211 u64 limit = addr + size;
1212 u64 start = addr;
1213 int pm = size >> shift;
1214 int pi = 0;
1215
1216
1217
1218
1219 while (page->shift && (page->shift != shift ||
1220 page->desc->func->pfn == NULL))
1221 page++;
1222
1223 if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
1224 !IS_ALIGNED(size, 1ULL << shift) ||
1225 addr + size < addr || addr + size > vmm->limit) {
1226 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1227 shift, page->shift, addr, size);
1228 return -EINVAL;
1229 }
1230
1231 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1232 return -ENOENT;
1233
1234 do {
1235 bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
1236 bool mapped = vma->mapped;
1237 u64 size = limit - start;
1238 u64 addr = start;
1239 int pn, ret = 0;
1240
1241
1242
1243
1244 for (pn = 0; pi + pn < pm; pn++) {
1245 if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
1246 break;
1247 }
1248 size = min_t(u64, size, pn << page->shift);
1249 size = min_t(u64, size, vma->size + vma->addr - addr);
1250
1251
1252
1253
1254 if (!vma->mapref || vma->memory) {
1255 ret = -EINVAL;
1256 goto next;
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (map != mapped) {
1271 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1272 page -
1273 vmm->func->page, map);
1274 if (WARN_ON(!tmp)) {
1275 ret = -ENOMEM;
1276 goto next;
1277 }
1278
1279 if ((tmp->mapped = map))
1280 tmp->refd = page - vmm->func->page;
1281 else
1282 tmp->refd = NVKM_VMA_PAGE_NONE;
1283 vma = tmp;
1284 }
1285
1286
1287 if (map) {
1288 struct nvkm_vmm_map args;
1289 args.page = page;
1290 args.pfn = &pfn[pi];
1291
1292 if (!mapped) {
1293 ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1294 size, &args, page->
1295 desc->func->pfn);
1296 } else {
1297 nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1298 page->desc->func->pfn);
1299 }
1300 } else {
1301 if (mapped) {
1302 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1303 false, true);
1304 }
1305 }
1306
1307 next:
1308
1309 if (vma->addr + vma->size == addr + size)
1310 vma = node(vma, next);
1311 start += size;
1312
1313 if (ret) {
1314
1315
1316
1317 while (size) {
1318 pfn[pi++] = NVKM_VMM_PFN_NONE;
1319 size -= 1 << page->shift;
1320 }
1321 } else {
1322 pi += size >> page->shift;
1323 }
1324 } while (vma && start < limit);
1325
1326 return 0;
1327 }
1328
1329 void
1330 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1331 {
1332 struct nvkm_vma *prev = NULL;
1333 struct nvkm_vma *next;
1334
1335 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1336 nvkm_memory_unref(&vma->memory);
1337 vma->mapped = false;
1338
1339 if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1340 prev = NULL;
1341 if ((next = node(vma, next)) && (!next->part || next->mapped))
1342 next = NULL;
1343 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1344 }
1345
1346 void
1347 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1348 {
1349 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1350
1351 if (vma->mapref) {
1352 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1353 vma->refd = NVKM_VMA_PAGE_NONE;
1354 } else {
1355 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1356 }
1357
1358 nvkm_vmm_unmap_region(vmm, vma);
1359 }
1360
1361 void
1362 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1363 {
1364 if (vma->memory) {
1365 mutex_lock(&vmm->mutex);
1366 nvkm_vmm_unmap_locked(vmm, vma, false);
1367 mutex_unlock(&vmm->mutex);
1368 }
1369 }
1370
1371 static int
1372 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1373 void *argv, u32 argc, struct nvkm_vmm_map *map)
1374 {
1375 switch (nvkm_memory_target(map->memory)) {
1376 case NVKM_MEM_TARGET_VRAM:
1377 if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
1378 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1379 return -EINVAL;
1380 }
1381 break;
1382 case NVKM_MEM_TARGET_HOST:
1383 case NVKM_MEM_TARGET_NCOH:
1384 if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
1385 VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1386 return -EINVAL;
1387 }
1388 break;
1389 default:
1390 WARN_ON(1);
1391 return -ENOSYS;
1392 }
1393
1394 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
1395 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1396 !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
1397 nvkm_memory_page(map->memory) < map->page->shift) {
1398 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1399 vma->addr, (u64)vma->size, map->offset, map->page->shift,
1400 nvkm_memory_page(map->memory));
1401 return -EINVAL;
1402 }
1403
1404 return vmm->func->valid(vmm, argv, argc, map);
1405 }
1406
1407 static int
1408 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1409 void *argv, u32 argc, struct nvkm_vmm_map *map)
1410 {
1411 for (map->page = vmm->func->page; map->page->shift; map->page++) {
1412 VMM_DEBUG(vmm, "trying %d", map->page->shift);
1413 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1414 return 0;
1415 }
1416 return -EINVAL;
1417 }
1418
1419 static int
1420 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1421 void *argv, u32 argc, struct nvkm_vmm_map *map)
1422 {
1423 nvkm_vmm_pte_func func;
1424 int ret;
1425
1426
1427 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1428 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1429 nvkm_memory_size(map->memory),
1430 map->offset, (u64)vma->size);
1431 return -EINVAL;
1432 }
1433
1434
1435 if (vma->page == NVKM_VMA_PAGE_NONE &&
1436 vma->refd == NVKM_VMA_PAGE_NONE) {
1437
1438 const u32 debug = vmm->debug;
1439 vmm->debug = 0;
1440 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1441 vmm->debug = debug;
1442 if (ret) {
1443 VMM_DEBUG(vmm, "invalid at any page size");
1444 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1445 return -EINVAL;
1446 }
1447 } else {
1448
1449 if (vma->refd != NVKM_VMA_PAGE_NONE)
1450 map->page = &vmm->func->page[vma->refd];
1451 else
1452 map->page = &vmm->func->page[vma->page];
1453
1454 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1455 if (ret) {
1456 VMM_DEBUG(vmm, "invalid %d\n", ret);
1457 return ret;
1458 }
1459 }
1460
1461
1462 map->off = map->offset;
1463 if (map->mem) {
1464 for (; map->off; map->mem = map->mem->next) {
1465 u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
1466 if (size > map->off)
1467 break;
1468 map->off -= size;
1469 }
1470 func = map->page->desc->func->mem;
1471 } else
1472 if (map->sgl) {
1473 for (; map->off; map->sgl = sg_next(map->sgl)) {
1474 u64 size = sg_dma_len(map->sgl);
1475 if (size > map->off)
1476 break;
1477 map->off -= size;
1478 }
1479 func = map->page->desc->func->sgl;
1480 } else {
1481 map->dma += map->offset >> PAGE_SHIFT;
1482 map->off = map->offset & PAGE_MASK;
1483 func = map->page->desc->func->dma;
1484 }
1485
1486
1487 if (vma->refd == NVKM_VMA_PAGE_NONE) {
1488 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1489 if (ret)
1490 return ret;
1491
1492 vma->refd = map->page - vmm->func->page;
1493 } else {
1494 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1495 }
1496
1497 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1498 nvkm_memory_unref(&vma->memory);
1499 vma->memory = nvkm_memory_ref(map->memory);
1500 vma->mapped = true;
1501 vma->tags = map->tags;
1502 return 0;
1503 }
1504
1505 int
1506 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1507 struct nvkm_vmm_map *map)
1508 {
1509 int ret;
1510 mutex_lock(&vmm->mutex);
1511 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1512 vma->busy = false;
1513 mutex_unlock(&vmm->mutex);
1514 return ret;
1515 }
1516
1517 static void
1518 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1519 {
1520 struct nvkm_vma *prev, *next;
1521
1522 if ((prev = node(vma, prev)) && !prev->used) {
1523 vma->addr = prev->addr;
1524 vma->size += prev->size;
1525 nvkm_vmm_free_delete(vmm, prev);
1526 }
1527
1528 if ((next = node(vma, next)) && !next->used) {
1529 vma->size += next->size;
1530 nvkm_vmm_free_delete(vmm, next);
1531 }
1532
1533 nvkm_vmm_free_insert(vmm, vma);
1534 }
1535
1536 void
1537 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1538 {
1539 const struct nvkm_vmm_page *page = vmm->func->page;
1540 struct nvkm_vma *next = vma;
1541
1542 BUG_ON(vma->part);
1543
1544 if (vma->mapref || !vma->sparse) {
1545 do {
1546 const bool mem = next->memory != NULL;
1547 const bool map = next->mapped;
1548 const u8 refd = next->refd;
1549 const u64 addr = next->addr;
1550 u64 size = next->size;
1551
1552
1553 while ((next = node(next, next)) && next->part &&
1554 (next->mapped == map) &&
1555 (next->memory != NULL) == mem &&
1556 (next->refd == refd))
1557 size += next->size;
1558
1559 if (map) {
1560
1561
1562
1563
1564 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1565 size, vma->sparse,
1566 !mem);
1567 } else
1568 if (refd != NVKM_VMA_PAGE_NONE) {
1569
1570 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1571 }
1572 } while (next && next->part);
1573 }
1574
1575
1576
1577
1578
1579 next = vma;
1580 do {
1581 if (next->mapped)
1582 nvkm_vmm_unmap_region(vmm, next);
1583 } while ((next = node(vma, next)) && next->part);
1584
1585 if (vma->sparse && !vma->mapref) {
1586
1587
1588
1589
1590
1591
1592
1593
1594 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1595 } else
1596 if (vma->sparse) {
1597
1598
1599
1600
1601
1602
1603
1604
1605 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1606 }
1607
1608
1609 nvkm_vmm_node_remove(vmm, vma);
1610
1611
1612 vma->page = NVKM_VMA_PAGE_NONE;
1613 vma->refd = NVKM_VMA_PAGE_NONE;
1614 vma->used = false;
1615 nvkm_vmm_put_region(vmm, vma);
1616 }
1617
1618 void
1619 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1620 {
1621 struct nvkm_vma *vma = *pvma;
1622 if (vma) {
1623 mutex_lock(&vmm->mutex);
1624 nvkm_vmm_put_locked(vmm, vma);
1625 mutex_unlock(&vmm->mutex);
1626 *pvma = NULL;
1627 }
1628 }
1629
1630 int
1631 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1632 u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
1633 {
1634 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1635 struct rb_node *node = NULL, *temp;
1636 struct nvkm_vma *vma = NULL, *tmp;
1637 u64 addr, tail;
1638 int ret;
1639
1640 VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1641 "shift: %d align: %d size: %016llx",
1642 getref, mapref, sparse, shift, align, size);
1643
1644
1645 if (unlikely(!size || (!getref && !mapref && sparse))) {
1646 VMM_DEBUG(vmm, "args %016llx %d %d %d",
1647 size, getref, mapref, sparse);
1648 return -EINVAL;
1649 }
1650
1651
1652
1653
1654
1655
1656
1657 if (unlikely((getref || vmm->func->page_block) && !shift)) {
1658 VMM_DEBUG(vmm, "page size required: %d %016llx",
1659 getref, vmm->func->page_block);
1660 return -EINVAL;
1661 }
1662
1663
1664
1665
1666 if (shift) {
1667 for (page = vmm->func->page; page->shift; page++) {
1668 if (shift == page->shift)
1669 break;
1670 }
1671
1672 if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
1673 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1674 return -EINVAL;
1675 }
1676 align = max_t(u8, align, shift);
1677 } else {
1678 align = max_t(u8, align, 12);
1679 }
1680
1681
1682 temp = vmm->free.rb_node;
1683 while (temp) {
1684 struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
1685 if (this->size < size) {
1686 temp = temp->rb_right;
1687 } else {
1688 node = temp;
1689 temp = temp->rb_left;
1690 }
1691 }
1692
1693 if (unlikely(!node))
1694 return -ENOSPC;
1695
1696
1697
1698
1699 do {
1700 struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
1701 struct nvkm_vma *prev = node(this, prev);
1702 struct nvkm_vma *next = node(this, next);
1703 const int p = page - vmm->func->page;
1704
1705 addr = this->addr;
1706 if (vmm->func->page_block && prev && prev->page != p)
1707 addr = ALIGN(addr, vmm->func->page_block);
1708 addr = ALIGN(addr, 1ULL << align);
1709
1710 tail = this->addr + this->size;
1711 if (vmm->func->page_block && next && next->page != p)
1712 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1713
1714 if (addr <= tail && tail - addr >= size) {
1715 nvkm_vmm_free_remove(vmm, this);
1716 vma = this;
1717 break;
1718 }
1719 } while ((node = rb_next(node)));
1720
1721 if (unlikely(!vma))
1722 return -ENOSPC;
1723
1724
1725
1726
1727 if (addr != vma->addr) {
1728 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1729 nvkm_vmm_put_region(vmm, vma);
1730 return -ENOMEM;
1731 }
1732 nvkm_vmm_free_insert(vmm, vma);
1733 vma = tmp;
1734 }
1735
1736 if (size != vma->size) {
1737 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1738 nvkm_vmm_put_region(vmm, vma);
1739 return -ENOMEM;
1740 }
1741 nvkm_vmm_free_insert(vmm, tmp);
1742 }
1743
1744
1745 if (sparse && getref)
1746 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1747 else if (sparse)
1748 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1749 else if (getref)
1750 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1751 else
1752 ret = 0;
1753 if (ret) {
1754 nvkm_vmm_put_region(vmm, vma);
1755 return ret;
1756 }
1757
1758 vma->mapref = mapref && !getref;
1759 vma->sparse = sparse;
1760 vma->page = page - vmm->func->page;
1761 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1762 vma->used = true;
1763 nvkm_vmm_node_insert(vmm, vma);
1764 *pvma = vma;
1765 return 0;
1766 }
1767
1768 int
1769 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1770 {
1771 int ret;
1772 mutex_lock(&vmm->mutex);
1773 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1774 mutex_unlock(&vmm->mutex);
1775 return ret;
1776 }
1777
1778 void
1779 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1780 {
1781 if (inst && vmm && vmm->func->part) {
1782 mutex_lock(&vmm->mutex);
1783 vmm->func->part(vmm, inst);
1784 mutex_unlock(&vmm->mutex);
1785 }
1786 }
1787
1788 int
1789 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1790 {
1791 int ret = 0;
1792 if (vmm->func->join) {
1793 mutex_lock(&vmm->mutex);
1794 ret = vmm->func->join(vmm, inst);
1795 mutex_unlock(&vmm->mutex);
1796 }
1797 return ret;
1798 }
1799
1800 static bool
1801 nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
1802 {
1803 const struct nvkm_vmm_desc *desc = it->desc;
1804 const int type = desc->type == SPT;
1805 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1806 return false;
1807 }
1808
1809 int
1810 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1811 {
1812 const struct nvkm_vmm_page *page = vmm->func->page;
1813 const u64 limit = vmm->limit - vmm->start;
1814 int ret;
1815
1816 while (page[1].shift)
1817 page++;
1818
1819 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1820 if (ret)
1821 return ret;
1822
1823 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1824 nvkm_vmm_boot_ptes, NULL, NULL, NULL);
1825 vmm->bootstrapped = true;
1826 return 0;
1827 }
1828
1829 static void
1830 nvkm_vmm_del(struct kref *kref)
1831 {
1832 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1833 nvkm_vmm_dtor(vmm);
1834 kfree(vmm);
1835 }
1836
1837 void
1838 nvkm_vmm_unref(struct nvkm_vmm **pvmm)
1839 {
1840 struct nvkm_vmm *vmm = *pvmm;
1841 if (vmm) {
1842 kref_put(&vmm->kref, nvkm_vmm_del);
1843 *pvmm = NULL;
1844 }
1845 }
1846
1847 struct nvkm_vmm *
1848 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1849 {
1850 if (vmm)
1851 kref_get(&vmm->kref);
1852 return vmm;
1853 }
1854
1855 int
1856 nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
1857 u32 argc, struct lock_class_key *key, const char *name,
1858 struct nvkm_vmm **pvmm)
1859 {
1860 struct nvkm_mmu *mmu = device->mmu;
1861 struct nvkm_vmm *vmm = NULL;
1862 int ret;
1863 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1864 key, name, &vmm);
1865 if (ret)
1866 nvkm_vmm_unref(&vmm);
1867 *pvmm = vmm;
1868 return ret;
1869 }