0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include "umem.h"
0023 #include "ummu.h"
0024
0025 #include <core/client.h>
0026 #include <core/memory.h>
0027 #include <subdev/bar.h>
0028
0029 #include <nvif/class.h>
0030 #include <nvif/if000a.h>
0031 #include <nvif/unpack.h>
0032
0033 static const struct nvkm_object_func nvkm_umem;
0034 struct nvkm_memory *
0035 nvkm_umem_search(struct nvkm_client *client, u64 handle)
0036 {
0037 struct nvkm_client *master = client->object.client;
0038 struct nvkm_memory *memory = NULL;
0039 struct nvkm_object *object;
0040 struct nvkm_umem *umem;
0041
0042 object = nvkm_object_search(client, handle, &nvkm_umem);
0043 if (IS_ERR(object)) {
0044 if (client != master) {
0045 spin_lock(&master->lock);
0046 list_for_each_entry(umem, &master->umem, head) {
0047 if (umem->object.object == handle) {
0048 memory = nvkm_memory_ref(umem->memory);
0049 break;
0050 }
0051 }
0052 spin_unlock(&master->lock);
0053 }
0054 } else {
0055 umem = nvkm_umem(object);
0056 memory = nvkm_memory_ref(umem->memory);
0057 }
0058
0059 return memory ? memory : ERR_PTR(-ENOENT);
0060 }
0061
0062 static int
0063 nvkm_umem_unmap(struct nvkm_object *object)
0064 {
0065 struct nvkm_umem *umem = nvkm_umem(object);
0066
0067 if (!umem->map)
0068 return -EEXIST;
0069
0070 if (umem->io) {
0071 if (!IS_ERR(umem->bar)) {
0072 struct nvkm_device *device = umem->mmu->subdev.device;
0073 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
0074 } else {
0075 umem->bar = NULL;
0076 }
0077 } else {
0078 vunmap(umem->map);
0079 umem->map = NULL;
0080 }
0081
0082 return 0;
0083 }
0084
0085 static int
0086 nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
0087 enum nvkm_object_map *type, u64 *handle, u64 *length)
0088 {
0089 struct nvkm_umem *umem = nvkm_umem(object);
0090 struct nvkm_mmu *mmu = umem->mmu;
0091
0092 if (!umem->mappable)
0093 return -EINVAL;
0094 if (umem->map)
0095 return -EEXIST;
0096
0097 if ((umem->type & NVKM_MEM_HOST) && !argc) {
0098 int ret = nvkm_mem_map_host(umem->memory, &umem->map);
0099 if (ret)
0100 return ret;
0101
0102 *handle = (unsigned long)(void *)umem->map;
0103 *length = nvkm_memory_size(umem->memory);
0104 *type = NVKM_OBJECT_MAP_VA;
0105 return 0;
0106 } else
0107 if ((umem->type & NVKM_MEM_VRAM) ||
0108 (umem->type & NVKM_MEM_KIND)) {
0109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
0110 handle, length, &umem->bar);
0111 if (ret)
0112 return ret;
0113
0114 *type = NVKM_OBJECT_MAP_IO;
0115 } else {
0116 return -EINVAL;
0117 }
0118
0119 umem->io = (*type == NVKM_OBJECT_MAP_IO);
0120 return 0;
0121 }
0122
0123 static void *
0124 nvkm_umem_dtor(struct nvkm_object *object)
0125 {
0126 struct nvkm_umem *umem = nvkm_umem(object);
0127 spin_lock(&umem->object.client->lock);
0128 list_del_init(&umem->head);
0129 spin_unlock(&umem->object.client->lock);
0130 nvkm_memory_unref(&umem->memory);
0131 return umem;
0132 }
0133
0134 static const struct nvkm_object_func
0135 nvkm_umem = {
0136 .dtor = nvkm_umem_dtor,
0137 .map = nvkm_umem_map,
0138 .unmap = nvkm_umem_unmap,
0139 };
0140
0141 int
0142 nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
0143 struct nvkm_object **pobject)
0144 {
0145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
0146 union {
0147 struct nvif_mem_v0 v0;
0148 } *args = argv;
0149 struct nvkm_umem *umem;
0150 int type, ret = -ENOSYS;
0151 u8 page;
0152 u64 size;
0153
0154 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
0155 type = args->v0.type;
0156 page = args->v0.page;
0157 size = args->v0.size;
0158 } else
0159 return ret;
0160
0161 if (type >= mmu->type_nr)
0162 return -EINVAL;
0163
0164 if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
0165 return -ENOMEM;
0166 nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
0167 umem->mmu = mmu;
0168 umem->type = mmu->type[type].type;
0169 INIT_LIST_HEAD(&umem->head);
0170 *pobject = &umem->object;
0171
0172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
0173 page = max_t(u8, page, PAGE_SHIFT);
0174 umem->mappable = true;
0175 }
0176
0177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
0178 &umem->memory);
0179 if (ret)
0180 return ret;
0181
0182 spin_lock(&umem->object.client->lock);
0183 list_add(&umem->head, &umem->object.client->umem);
0184 spin_unlock(&umem->object.client->lock);
0185
0186 args->v0.page = nvkm_memory_page(umem->memory);
0187 args->v0.addr = nvkm_memory_addr(umem->memory);
0188 args->v0.size = nvkm_memory_size(umem->memory);
0189 return 0;
0190 }