0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include "uvmm.h"
0023 #include "umem.h"
0024 #include "ummu.h"
0025
0026 #include <core/client.h>
0027 #include <core/memory.h>
0028
0029 #include <nvif/if000c.h>
0030 #include <nvif/unpack.h>
0031
0032 static const struct nvkm_object_func nvkm_uvmm;
0033 struct nvkm_vmm *
0034 nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
0035 {
0036 struct nvkm_object *object;
0037
0038 object = nvkm_object_search(client, handle, &nvkm_uvmm);
0039 if (IS_ERR(object))
0040 return (void *)object;
0041
0042 return nvkm_uvmm(object)->vmm;
0043 }
0044
0045 static int
0046 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0047 {
0048 union {
0049 struct nvif_vmm_pfnclr_v0 v0;
0050 } *args = argv;
0051 struct nvkm_vmm *vmm = uvmm->vmm;
0052 int ret = -ENOSYS;
0053 u64 addr, size;
0054
0055 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0056 addr = args->v0.addr;
0057 size = args->v0.size;
0058 } else
0059 return ret;
0060
0061 if (size) {
0062 mutex_lock(&vmm->mutex);
0063 ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
0064 mutex_unlock(&vmm->mutex);
0065 }
0066
0067 return ret;
0068 }
0069
0070 static int
0071 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0072 {
0073 union {
0074 struct nvif_vmm_pfnmap_v0 v0;
0075 } *args = argv;
0076 struct nvkm_vmm *vmm = uvmm->vmm;
0077 int ret = -ENOSYS;
0078 u64 addr, size, *phys;
0079 u8 page;
0080
0081 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
0082 page = args->v0.page;
0083 addr = args->v0.addr;
0084 size = args->v0.size;
0085 phys = args->v0.phys;
0086 if (argc != (size >> page) * sizeof(args->v0.phys[0]))
0087 return -EINVAL;
0088 } else
0089 return ret;
0090
0091 if (size) {
0092 mutex_lock(&vmm->mutex);
0093 ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
0094 mutex_unlock(&vmm->mutex);
0095 }
0096
0097 return ret;
0098 }
0099
0100 static int
0101 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0102 {
0103 union {
0104 struct nvif_vmm_unmap_v0 v0;
0105 } *args = argv;
0106 struct nvkm_vmm *vmm = uvmm->vmm;
0107 struct nvkm_vma *vma;
0108 int ret = -ENOSYS;
0109 u64 addr;
0110
0111 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0112 addr = args->v0.addr;
0113 } else
0114 return ret;
0115
0116 mutex_lock(&vmm->mutex);
0117 vma = nvkm_vmm_node_search(vmm, addr);
0118 if (ret = -ENOENT, !vma || vma->addr != addr) {
0119 VMM_DEBUG(vmm, "lookup %016llx: %016llx",
0120 addr, vma ? vma->addr : ~0ULL);
0121 goto done;
0122 }
0123
0124 if (ret = -ENOENT, vma->busy) {
0125 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
0126 goto done;
0127 }
0128
0129 if (ret = -EINVAL, !vma->memory) {
0130 VMM_DEBUG(vmm, "unmapped");
0131 goto done;
0132 }
0133
0134 nvkm_vmm_unmap_locked(vmm, vma, false);
0135 ret = 0;
0136 done:
0137 mutex_unlock(&vmm->mutex);
0138 return ret;
0139 }
0140
0141 static int
0142 nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0143 {
0144 struct nvkm_client *client = uvmm->object.client;
0145 union {
0146 struct nvif_vmm_map_v0 v0;
0147 } *args = argv;
0148 u64 addr, size, handle, offset;
0149 struct nvkm_vmm *vmm = uvmm->vmm;
0150 struct nvkm_vma *vma;
0151 struct nvkm_memory *memory;
0152 int ret = -ENOSYS;
0153
0154 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
0155 addr = args->v0.addr;
0156 size = args->v0.size;
0157 handle = args->v0.memory;
0158 offset = args->v0.offset;
0159 } else
0160 return ret;
0161
0162 memory = nvkm_umem_search(client, handle);
0163 if (IS_ERR(memory)) {
0164 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
0165 return PTR_ERR(memory);
0166 }
0167
0168 mutex_lock(&vmm->mutex);
0169 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
0170 VMM_DEBUG(vmm, "lookup %016llx", addr);
0171 goto fail;
0172 }
0173
0174 if (ret = -ENOENT, vma->busy) {
0175 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
0176 goto fail;
0177 }
0178
0179 if (ret = -EINVAL, vma->mapped && !vma->memory) {
0180 VMM_DEBUG(vmm, "pfnmap %016llx", addr);
0181 goto fail;
0182 }
0183
0184 if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
0185 if (addr + size > vma->addr + vma->size || vma->memory ||
0186 (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
0187 VMM_DEBUG(vmm, "split %d %d %d "
0188 "%016llx %016llx %016llx %016llx",
0189 !!vma->memory, vma->refd, vma->mapref,
0190 addr, size, vma->addr, (u64)vma->size);
0191 goto fail;
0192 }
0193
0194 vma = nvkm_vmm_node_split(vmm, vma, addr, size);
0195 if (!vma) {
0196 ret = -ENOMEM;
0197 goto fail;
0198 }
0199 }
0200 vma->busy = true;
0201 mutex_unlock(&vmm->mutex);
0202
0203 ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
0204 if (ret == 0) {
0205
0206 nvkm_memory_unref(&memory);
0207 return 0;
0208 }
0209
0210 mutex_lock(&vmm->mutex);
0211 vma->busy = false;
0212 nvkm_vmm_unmap_region(vmm, vma);
0213 fail:
0214 mutex_unlock(&vmm->mutex);
0215 nvkm_memory_unref(&memory);
0216 return ret;
0217 }
0218
0219 static int
0220 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0221 {
0222 union {
0223 struct nvif_vmm_put_v0 v0;
0224 } *args = argv;
0225 struct nvkm_vmm *vmm = uvmm->vmm;
0226 struct nvkm_vma *vma;
0227 int ret = -ENOSYS;
0228 u64 addr;
0229
0230 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0231 addr = args->v0.addr;
0232 } else
0233 return ret;
0234
0235 mutex_lock(&vmm->mutex);
0236 vma = nvkm_vmm_node_search(vmm, args->v0.addr);
0237 if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
0238 VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
0239 vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
0240 goto done;
0241 }
0242
0243 if (ret = -ENOENT, vma->busy) {
0244 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
0245 goto done;
0246 }
0247
0248 nvkm_vmm_put_locked(vmm, vma);
0249 ret = 0;
0250 done:
0251 mutex_unlock(&vmm->mutex);
0252 return ret;
0253 }
0254
0255 static int
0256 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0257 {
0258 union {
0259 struct nvif_vmm_get_v0 v0;
0260 } *args = argv;
0261 struct nvkm_vmm *vmm = uvmm->vmm;
0262 struct nvkm_vma *vma;
0263 int ret = -ENOSYS;
0264 bool getref, mapref, sparse;
0265 u8 page, align;
0266 u64 size;
0267
0268 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0269 getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
0270 mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
0271 sparse = args->v0.sparse;
0272 page = args->v0.page;
0273 align = args->v0.align;
0274 size = args->v0.size;
0275 } else
0276 return ret;
0277
0278 mutex_lock(&vmm->mutex);
0279 ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
0280 page, align, size, &vma);
0281 mutex_unlock(&vmm->mutex);
0282 if (ret)
0283 return ret;
0284
0285 args->v0.addr = vma->addr;
0286 return ret;
0287 }
0288
0289 static int
0290 nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
0291 {
0292 union {
0293 struct nvif_vmm_page_v0 v0;
0294 } *args = argv;
0295 const struct nvkm_vmm_page *page;
0296 int ret = -ENOSYS;
0297 u8 type, index, nr;
0298
0299 page = uvmm->vmm->func->page;
0300 for (nr = 0; page[nr].shift; nr++);
0301
0302 if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0303 if ((index = args->v0.index) >= nr)
0304 return -EINVAL;
0305 type = page[index].type;
0306 args->v0.shift = page[index].shift;
0307 args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
0308 args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
0309 args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
0310 args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
0311 } else
0312 return -ENOSYS;
0313
0314 return 0;
0315 }
0316
0317 static int
0318 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
0319 {
0320 struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
0321 switch (mthd) {
0322 case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
0323 case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
0324 case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
0325 case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
0326 case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
0327 case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
0328 case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
0329 case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
0330 if (uvmm->vmm->func->mthd) {
0331 return uvmm->vmm->func->mthd(uvmm->vmm,
0332 uvmm->object.client,
0333 mthd, argv, argc);
0334 }
0335 break;
0336 default:
0337 break;
0338 }
0339 return -EINVAL;
0340 }
0341
0342 static void *
0343 nvkm_uvmm_dtor(struct nvkm_object *object)
0344 {
0345 struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
0346 nvkm_vmm_unref(&uvmm->vmm);
0347 return uvmm;
0348 }
0349
0350 static const struct nvkm_object_func
0351 nvkm_uvmm = {
0352 .dtor = nvkm_uvmm_dtor,
0353 .mthd = nvkm_uvmm_mthd,
0354 };
0355
0356 int
0357 nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
0358 struct nvkm_object **pobject)
0359 {
0360 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
0361 const bool more = oclass->base.maxver >= 0;
0362 union {
0363 struct nvif_vmm_v0 v0;
0364 } *args = argv;
0365 const struct nvkm_vmm_page *page;
0366 struct nvkm_uvmm *uvmm;
0367 int ret = -ENOSYS;
0368 u64 addr, size;
0369 bool managed;
0370
0371 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
0372 managed = args->v0.managed != 0;
0373 addr = args->v0.addr;
0374 size = args->v0.size;
0375 } else
0376 return ret;
0377
0378 if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
0379 return -ENOMEM;
0380 nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
0381 *pobject = &uvmm->object;
0382
0383 if (!mmu->vmm) {
0384 ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
0385 NULL, "user", &uvmm->vmm);
0386 if (ret)
0387 return ret;
0388
0389 uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
0390 } else {
0391 if (size)
0392 return -EINVAL;
0393
0394 uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
0395 }
0396
0397 page = uvmm->vmm->func->page;
0398 args->v0.page_nr = 0;
0399 while (page && (page++)->shift)
0400 args->v0.page_nr++;
0401 args->v0.addr = uvmm->vmm->start;
0402 args->v0.size = uvmm->vmm->limit;
0403 return 0;
0404 }