Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Red Hat Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Ben Skeggs <bskeggs@redhat.com>
0023  */
0024 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
0025 #include "ram.h"
0026 
0027 #include <core/memory.h>
0028 #include <subdev/mmu.h>
0029 
0030 struct nvkm_vram {
0031     struct nvkm_memory memory;
0032     struct nvkm_ram *ram;
0033     u8 page;
0034     struct nvkm_mm_node *mn;
0035 };
0036 
0037 static int
0038 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
0039           struct nvkm_vma *vma, void *argv, u32 argc)
0040 {
0041     struct nvkm_vram *vram = nvkm_vram(memory);
0042     struct nvkm_vmm_map map = {
0043         .memory = &vram->memory,
0044         .offset = offset,
0045         .mem = vram->mn,
0046     };
0047 
0048     return nvkm_vmm_map(vmm, vma, argv, argc, &map);
0049 }
0050 
0051 static u64
0052 nvkm_vram_size(struct nvkm_memory *memory)
0053 {
0054     return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
0055 }
0056 
0057 static u64
0058 nvkm_vram_addr(struct nvkm_memory *memory)
0059 {
0060     struct nvkm_vram *vram = nvkm_vram(memory);
0061     if (!nvkm_mm_contiguous(vram->mn))
0062         return ~0ULL;
0063     return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
0064 }
0065 
0066 static u8
0067 nvkm_vram_page(struct nvkm_memory *memory)
0068 {
0069     return nvkm_vram(memory)->page;
0070 }
0071 
0072 static enum nvkm_memory_target
0073 nvkm_vram_target(struct nvkm_memory *memory)
0074 {
0075     return NVKM_MEM_TARGET_VRAM;
0076 }
0077 
0078 static void *
0079 nvkm_vram_dtor(struct nvkm_memory *memory)
0080 {
0081     struct nvkm_vram *vram = nvkm_vram(memory);
0082     struct nvkm_mm_node *next = vram->mn;
0083     struct nvkm_mm_node *node;
0084     mutex_lock(&vram->ram->mutex);
0085     while ((node = next)) {
0086         next = node->next;
0087         nvkm_mm_free(&vram->ram->vram, &node);
0088     }
0089     mutex_unlock(&vram->ram->mutex);
0090     return vram;
0091 }
0092 
0093 static const struct nvkm_memory_func
0094 nvkm_vram = {
0095     .dtor = nvkm_vram_dtor,
0096     .target = nvkm_vram_target,
0097     .page = nvkm_vram_page,
0098     .addr = nvkm_vram_addr,
0099     .size = nvkm_vram_size,
0100     .map = nvkm_vram_map,
0101 };
0102 
0103 int
0104 nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
0105          bool contig, bool back, struct nvkm_memory **pmemory)
0106 {
0107     struct nvkm_ram *ram;
0108     struct nvkm_mm *mm;
0109     struct nvkm_mm_node **node, *r;
0110     struct nvkm_vram *vram;
0111     u8   page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
0112     u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
0113     u32   max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
0114     u32   min = contig ? max : align;
0115     int ret;
0116 
0117     if (!device->fb || !(ram = device->fb->ram))
0118         return -ENODEV;
0119     ram = device->fb->ram;
0120     mm = &ram->vram;
0121 
0122     if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
0123         return -ENOMEM;
0124     nvkm_memory_ctor(&nvkm_vram, &vram->memory);
0125     vram->ram = ram;
0126     vram->page = page;
0127     *pmemory = &vram->memory;
0128 
0129     mutex_lock(&ram->mutex);
0130     node = &vram->mn;
0131     do {
0132         if (back)
0133             ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
0134         else
0135             ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
0136         if (ret) {
0137             mutex_unlock(&ram->mutex);
0138             nvkm_memory_unref(pmemory);
0139             return ret;
0140         }
0141 
0142         *node = r;
0143         node = &r->next;
0144         max -= r->length;
0145     } while (max);
0146     mutex_unlock(&ram->mutex);
0147     return 0;
0148 }
0149 
0150 int
0151 nvkm_ram_init(struct nvkm_ram *ram)
0152 {
0153     if (ram->func->init)
0154         return ram->func->init(ram);
0155     return 0;
0156 }
0157 
0158 void
0159 nvkm_ram_del(struct nvkm_ram **pram)
0160 {
0161     struct nvkm_ram *ram = *pram;
0162     if (ram && !WARN_ON(!ram->func)) {
0163         if (ram->func->dtor)
0164             *pram = ram->func->dtor(ram);
0165         nvkm_mm_fini(&ram->vram);
0166         mutex_destroy(&ram->mutex);
0167         kfree(*pram);
0168         *pram = NULL;
0169     }
0170 }
0171 
0172 int
0173 nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
0174           enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
0175 {
0176     static const char *name[] = {
0177         [NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
0178         [NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
0179         [NVKM_RAM_TYPE_SGRAM  ] = "SGRAM",
0180         [NVKM_RAM_TYPE_SDRAM  ] = "SDRAM",
0181         [NVKM_RAM_TYPE_DDR1   ] = "DDR1",
0182         [NVKM_RAM_TYPE_DDR2   ] = "DDR2",
0183         [NVKM_RAM_TYPE_DDR3   ] = "DDR3",
0184         [NVKM_RAM_TYPE_GDDR2  ] = "GDDR2",
0185         [NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
0186         [NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
0187         [NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
0188         [NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
0189         [NVKM_RAM_TYPE_GDDR6  ] = "GDDR6",
0190         [NVKM_RAM_TYPE_HBM2   ] = "HBM2",
0191     };
0192     struct nvkm_subdev *subdev = &fb->subdev;
0193     int ret;
0194 
0195     nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
0196     ram->func = func;
0197     ram->fb = fb;
0198     ram->type = type;
0199     ram->size = size;
0200     mutex_init(&ram->mutex);
0201 
0202     if (!nvkm_mm_initialised(&ram->vram)) {
0203         ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
0204                    size >> NVKM_RAM_MM_SHIFT, 1);
0205         if (ret)
0206             return ret;
0207     }
0208 
0209     return 0;
0210 }
0211 
0212 int
0213 nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
0214           enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
0215 {
0216     if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
0217         return -ENOMEM;
0218     return nvkm_ram_ctor(func, fb, type, size, *pram);
0219 }