Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2012 Red Hat Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Ben Skeggs
0023  */
0024 #include <core/gpuobj.h>
0025 #include <core/engine.h>
0026 
0027 #include <subdev/instmem.h>
0028 #include <subdev/bar.h>
0029 #include <subdev/mmu.h>
0030 
0031 /* fast-path, where backend is able to provide direct pointer to memory */
0032 static u32
0033 nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
0034 {
0035     return ioread32_native(gpuobj->map + offset);
0036 }
0037 
0038 static void
0039 nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
0040 {
0041     iowrite32_native(data, gpuobj->map + offset);
0042 }
0043 
0044 /* accessor functions for gpuobjs allocated directly from instmem */
0045 static int
0046 nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
0047              struct nvkm_vmm *vmm, struct nvkm_vma *vma,
0048              void *argv, u32 argc)
0049 {
0050     return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
0051 }
0052 
0053 static u32
0054 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
0055 {
0056     return nvkm_ro32(gpuobj->memory, offset);
0057 }
0058 
0059 static void
0060 nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
0061 {
0062     nvkm_wo32(gpuobj->memory, offset, data);
0063 }
0064 
0065 static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
0066 static void
0067 nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
0068 {
0069     gpuobj->func = &nvkm_gpuobj_heap;
0070     nvkm_done(gpuobj->memory);
0071 }
0072 
0073 static const struct nvkm_gpuobj_func
0074 nvkm_gpuobj_heap_fast = {
0075     .release = nvkm_gpuobj_heap_release,
0076     .rd32 = nvkm_gpuobj_rd32_fast,
0077     .wr32 = nvkm_gpuobj_wr32_fast,
0078     .map = nvkm_gpuobj_heap_map,
0079 };
0080 
0081 static const struct nvkm_gpuobj_func
0082 nvkm_gpuobj_heap_slow = {
0083     .release = nvkm_gpuobj_heap_release,
0084     .rd32 = nvkm_gpuobj_heap_rd32,
0085     .wr32 = nvkm_gpuobj_heap_wr32,
0086     .map = nvkm_gpuobj_heap_map,
0087 };
0088 
0089 static void *
0090 nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
0091 {
0092     gpuobj->map = nvkm_kmap(gpuobj->memory);
0093     if (likely(gpuobj->map))
0094         gpuobj->func = &nvkm_gpuobj_heap_fast;
0095     else
0096         gpuobj->func = &nvkm_gpuobj_heap_slow;
0097     return gpuobj->map;
0098 }
0099 
0100 static const struct nvkm_gpuobj_func
0101 nvkm_gpuobj_heap = {
0102     .acquire = nvkm_gpuobj_heap_acquire,
0103     .map = nvkm_gpuobj_heap_map,
0104 };
0105 
0106 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
0107 static int
0108 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
0109         struct nvkm_vmm *vmm, struct nvkm_vma *vma,
0110         void *argv, u32 argc)
0111 {
0112     return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
0113                    vmm, vma, argv, argc);
0114 }
0115 
0116 static u32
0117 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
0118 {
0119     return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
0120 }
0121 
0122 static void
0123 nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
0124 {
0125     nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
0126 }
0127 
0128 static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
0129 static void
0130 nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
0131 {
0132     gpuobj->func = &nvkm_gpuobj_func;
0133     nvkm_done(gpuobj->parent);
0134 }
0135 
0136 static const struct nvkm_gpuobj_func
0137 nvkm_gpuobj_fast = {
0138     .release = nvkm_gpuobj_release,
0139     .rd32 = nvkm_gpuobj_rd32_fast,
0140     .wr32 = nvkm_gpuobj_wr32_fast,
0141     .map = nvkm_gpuobj_map,
0142 };
0143 
0144 static const struct nvkm_gpuobj_func
0145 nvkm_gpuobj_slow = {
0146     .release = nvkm_gpuobj_release,
0147     .rd32 = nvkm_gpuobj_rd32,
0148     .wr32 = nvkm_gpuobj_wr32,
0149     .map = nvkm_gpuobj_map,
0150 };
0151 
0152 static void *
0153 nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
0154 {
0155     gpuobj->map = nvkm_kmap(gpuobj->parent);
0156     if (likely(gpuobj->map)) {
0157         gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
0158         gpuobj->func = &nvkm_gpuobj_fast;
0159     } else {
0160         gpuobj->func = &nvkm_gpuobj_slow;
0161     }
0162     return gpuobj->map;
0163 }
0164 
0165 static const struct nvkm_gpuobj_func
0166 nvkm_gpuobj_func = {
0167     .acquire = nvkm_gpuobj_acquire,
0168     .map = nvkm_gpuobj_map,
0169 };
0170 
0171 static int
0172 nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
0173          struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
0174 {
0175     u32 offset;
0176     int ret;
0177 
0178     if (parent) {
0179         if (align >= 0) {
0180             ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
0181                        max(align, 1), &gpuobj->node);
0182         } else {
0183             ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
0184                        -align, &gpuobj->node);
0185         }
0186         if (ret)
0187             return ret;
0188 
0189         gpuobj->parent = parent;
0190         gpuobj->func = &nvkm_gpuobj_func;
0191         gpuobj->addr = parent->addr + gpuobj->node->offset;
0192         gpuobj->size = gpuobj->node->length;
0193 
0194         if (zero) {
0195             nvkm_kmap(gpuobj);
0196             for (offset = 0; offset < gpuobj->size; offset += 4)
0197                 nvkm_wo32(gpuobj, offset, 0x00000000);
0198             nvkm_done(gpuobj);
0199         }
0200     } else {
0201         ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
0202                       abs(align), zero, &gpuobj->memory);
0203         if (ret)
0204             return ret;
0205 
0206         gpuobj->func = &nvkm_gpuobj_heap;
0207         gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
0208         gpuobj->size = nvkm_memory_size(gpuobj->memory);
0209     }
0210 
0211     return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
0212 }
0213 
0214 void
0215 nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
0216 {
0217     struct nvkm_gpuobj *gpuobj = *pgpuobj;
0218     if (gpuobj) {
0219         if (gpuobj->parent)
0220             nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
0221         nvkm_mm_fini(&gpuobj->heap);
0222         nvkm_memory_unref(&gpuobj->memory);
0223         kfree(*pgpuobj);
0224         *pgpuobj = NULL;
0225     }
0226 }
0227 
0228 int
0229 nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
0230         struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
0231 {
0232     struct nvkm_gpuobj *gpuobj;
0233     int ret;
0234 
0235     if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
0236         return -ENOMEM;
0237 
0238     ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
0239     if (ret)
0240         nvkm_gpuobj_del(pgpuobj);
0241     return ret;
0242 }
0243 
0244 /* the below is basically only here to support sharing the paged dma object
0245  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
0246  * anywhere else.
0247  */
0248 
0249 int
0250 nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
0251 {
0252     if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
0253         return -ENOMEM;
0254 
0255     (*pgpuobj)->addr = nvkm_memory_addr(memory);
0256     (*pgpuobj)->size = nvkm_memory_size(memory);
0257     return 0;
0258 }
0259 
0260 void
0261 nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
0262               u32 length)
0263 {
0264     int i;
0265 
0266     for (i = 0; i < length; i += 4)
0267         nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
0268 }
0269 
0270 void
0271 nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
0272             u32 length)
0273 {
0274     int i;
0275 
0276     for (i = 0; i < length; i += 4)
0277         ((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
0278 }