0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
0023 #include "mem.h"
0024
0025 #include <core/memory.h>
0026
0027 #include <nvif/if000a.h>
0028 #include <nvif/unpack.h>
0029
0030 struct nvkm_mem {
0031 struct nvkm_memory memory;
0032 enum nvkm_memory_target target;
0033 struct nvkm_mmu *mmu;
0034 u64 pages;
0035 struct page **mem;
0036 union {
0037 struct scatterlist *sgl;
0038 dma_addr_t *dma;
0039 };
0040 };
0041
0042 static enum nvkm_memory_target
0043 nvkm_mem_target(struct nvkm_memory *memory)
0044 {
0045 return nvkm_mem(memory)->target;
0046 }
0047
0048 static u8
0049 nvkm_mem_page(struct nvkm_memory *memory)
0050 {
0051 return PAGE_SHIFT;
0052 }
0053
0054 static u64
0055 nvkm_mem_addr(struct nvkm_memory *memory)
0056 {
0057 struct nvkm_mem *mem = nvkm_mem(memory);
0058 if (mem->pages == 1 && mem->mem)
0059 return mem->dma[0];
0060 return ~0ULL;
0061 }
0062
0063 static u64
0064 nvkm_mem_size(struct nvkm_memory *memory)
0065 {
0066 return nvkm_mem(memory)->pages << PAGE_SHIFT;
0067 }
0068
0069 static int
0070 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
0071 struct nvkm_vma *vma, void *argv, u32 argc)
0072 {
0073 struct nvkm_mem *mem = nvkm_mem(memory);
0074 struct nvkm_vmm_map map = {
0075 .memory = &mem->memory,
0076 .offset = offset,
0077 .dma = mem->dma,
0078 };
0079 return nvkm_vmm_map(vmm, vma, argv, argc, &map);
0080 }
0081
0082 static void *
0083 nvkm_mem_dtor(struct nvkm_memory *memory)
0084 {
0085 struct nvkm_mem *mem = nvkm_mem(memory);
0086 if (mem->mem) {
0087 while (mem->pages--) {
0088 dma_unmap_page(mem->mmu->subdev.device->dev,
0089 mem->dma[mem->pages], PAGE_SIZE,
0090 DMA_BIDIRECTIONAL);
0091 __free_page(mem->mem[mem->pages]);
0092 }
0093 kvfree(mem->dma);
0094 kvfree(mem->mem);
0095 }
0096 return mem;
0097 }
0098
0099 static const struct nvkm_memory_func
0100 nvkm_mem_dma = {
0101 .dtor = nvkm_mem_dtor,
0102 .target = nvkm_mem_target,
0103 .page = nvkm_mem_page,
0104 .addr = nvkm_mem_addr,
0105 .size = nvkm_mem_size,
0106 .map = nvkm_mem_map_dma,
0107 };
0108
0109 static int
0110 nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
0111 struct nvkm_vma *vma, void *argv, u32 argc)
0112 {
0113 struct nvkm_mem *mem = nvkm_mem(memory);
0114 struct nvkm_vmm_map map = {
0115 .memory = &mem->memory,
0116 .offset = offset,
0117 .sgl = mem->sgl,
0118 };
0119 return nvkm_vmm_map(vmm, vma, argv, argc, &map);
0120 }
0121
0122 static const struct nvkm_memory_func
0123 nvkm_mem_sgl = {
0124 .dtor = nvkm_mem_dtor,
0125 .target = nvkm_mem_target,
0126 .page = nvkm_mem_page,
0127 .addr = nvkm_mem_addr,
0128 .size = nvkm_mem_size,
0129 .map = nvkm_mem_map_sgl,
0130 };
0131
0132 int
0133 nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
0134 {
0135 struct nvkm_mem *mem = nvkm_mem(memory);
0136 if (mem->mem) {
0137 *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
0138 return *pmap ? 0 : -EFAULT;
0139 }
0140 return -EINVAL;
0141 }
0142
0143 static int
0144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
0145 void *argv, u32 argc, struct nvkm_memory **pmemory)
0146 {
0147 struct device *dev = mmu->subdev.device->dev;
0148 union {
0149 struct nvif_mem_ram_vn vn;
0150 struct nvif_mem_ram_v0 v0;
0151 } *args = argv;
0152 int ret = -ENOSYS;
0153 enum nvkm_memory_target target;
0154 struct nvkm_mem *mem;
0155 gfp_t gfp = GFP_USER | __GFP_ZERO;
0156
0157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
0158 !(mmu->type[type].type & NVKM_MEM_UNCACHED))
0159 target = NVKM_MEM_TARGET_HOST;
0160 else
0161 target = NVKM_MEM_TARGET_NCOH;
0162
0163 if (page != PAGE_SHIFT)
0164 return -EINVAL;
0165
0166 if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
0167 return -ENOMEM;
0168 mem->target = target;
0169 mem->mmu = mmu;
0170 *pmemory = &mem->memory;
0171
0172 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
0173 if (args->v0.dma) {
0174 nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
0175 mem->dma = args->v0.dma;
0176 } else {
0177 nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
0178 mem->sgl = args->v0.sgl;
0179 }
0180
0181 if (!IS_ALIGNED(size, PAGE_SIZE))
0182 return -EINVAL;
0183 mem->pages = size >> PAGE_SHIFT;
0184 return 0;
0185 } else
0186 if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
0187 kfree(mem);
0188 return ret;
0189 }
0190
0191 nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
0192 size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
0193
0194 if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
0195 return -ENOMEM;
0196 if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
0197 return -ENOMEM;
0198
0199 if (mmu->dma_bits > 32)
0200 gfp |= GFP_HIGHUSER;
0201 else
0202 gfp |= GFP_DMA32;
0203
0204 for (mem->pages = 0; size; size--, mem->pages++) {
0205 struct page *p = alloc_page(gfp);
0206 if (!p)
0207 return -ENOMEM;
0208
0209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
0210 p, 0, PAGE_SIZE,
0211 DMA_BIDIRECTIONAL);
0212 if (dma_mapping_error(dev, mem->dma[mem->pages])) {
0213 __free_page(p);
0214 return -ENOMEM;
0215 }
0216
0217 mem->mem[mem->pages] = p;
0218 }
0219
0220 return 0;
0221 }
0222
0223 int
0224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
0225 void *argv, u32 argc, struct nvkm_memory **pmemory)
0226 {
0227 struct nvkm_memory *memory = NULL;
0228 int ret;
0229
0230 if (mmu->type[type].type & NVKM_MEM_VRAM) {
0231 ret = mmu->func->mem.vram(mmu, type, page, size,
0232 argv, argc, &memory);
0233 } else {
0234 ret = nvkm_mem_new_host(mmu, type, page, size,
0235 argv, argc, &memory);
0236 }
0237
0238 if (ret)
0239 nvkm_memory_unref(&memory);
0240 *pmemory = memory;
0241 return ret;
0242 }