0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
0025 #include "priv.h"
0026
0027 #include <core/ramht.h>
0028
0029 struct nv04_instmem {
0030 struct nvkm_instmem base;
0031 struct nvkm_mm heap;
0032 };
0033
0034
0035
0036
0037 #define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
0038
0039 struct nv04_instobj {
0040 struct nvkm_instobj base;
0041 struct nv04_instmem *imem;
0042 struct nvkm_mm_node *node;
0043 };
0044
0045 static void
0046 nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
0047 {
0048 struct nv04_instobj *iobj = nv04_instobj(memory);
0049 struct nvkm_device *device = iobj->imem->base.subdev.device;
0050 nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
0051 }
0052
0053 static u32
0054 nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
0055 {
0056 struct nv04_instobj *iobj = nv04_instobj(memory);
0057 struct nvkm_device *device = iobj->imem->base.subdev.device;
0058 return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
0059 }
0060
0061 static const struct nvkm_memory_ptrs
0062 nv04_instobj_ptrs = {
0063 .rd32 = nv04_instobj_rd32,
0064 .wr32 = nv04_instobj_wr32,
0065 };
0066
0067 static void
0068 nv04_instobj_release(struct nvkm_memory *memory)
0069 {
0070 }
0071
0072 static void __iomem *
0073 nv04_instobj_acquire(struct nvkm_memory *memory)
0074 {
0075 struct nv04_instobj *iobj = nv04_instobj(memory);
0076 struct nvkm_device *device = iobj->imem->base.subdev.device;
0077 return device->pri + 0x700000 + iobj->node->offset;
0078 }
0079
0080 static u64
0081 nv04_instobj_size(struct nvkm_memory *memory)
0082 {
0083 return nv04_instobj(memory)->node->length;
0084 }
0085
0086 static u64
0087 nv04_instobj_addr(struct nvkm_memory *memory)
0088 {
0089 return nv04_instobj(memory)->node->offset;
0090 }
0091
0092 static enum nvkm_memory_target
0093 nv04_instobj_target(struct nvkm_memory *memory)
0094 {
0095 return NVKM_MEM_TARGET_INST;
0096 }
0097
0098 static void *
0099 nv04_instobj_dtor(struct nvkm_memory *memory)
0100 {
0101 struct nv04_instobj *iobj = nv04_instobj(memory);
0102 mutex_lock(&iobj->imem->base.mutex);
0103 nvkm_mm_free(&iobj->imem->heap, &iobj->node);
0104 mutex_unlock(&iobj->imem->base.mutex);
0105 nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
0106 return iobj;
0107 }
0108
0109 static const struct nvkm_memory_func
0110 nv04_instobj_func = {
0111 .dtor = nv04_instobj_dtor,
0112 .target = nv04_instobj_target,
0113 .size = nv04_instobj_size,
0114 .addr = nv04_instobj_addr,
0115 .acquire = nv04_instobj_acquire,
0116 .release = nv04_instobj_release,
0117 };
0118
0119 static int
0120 nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
0121 struct nvkm_memory **pmemory)
0122 {
0123 struct nv04_instmem *imem = nv04_instmem(base);
0124 struct nv04_instobj *iobj;
0125 int ret;
0126
0127 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
0128 return -ENOMEM;
0129 *pmemory = &iobj->base.memory;
0130
0131 nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base);
0132 iobj->base.memory.ptrs = &nv04_instobj_ptrs;
0133 iobj->imem = imem;
0134
0135 mutex_lock(&imem->base.mutex);
0136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
0137 mutex_unlock(&imem->base.mutex);
0138 return ret;
0139 }
0140
0141
0142
0143
0144
0145 static u32
0146 nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
0147 {
0148 return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
0149 }
0150
0151 static void
0152 nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
0153 {
0154 nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
0155 }
0156
0157 static int
0158 nv04_instmem_oneinit(struct nvkm_instmem *base)
0159 {
0160 struct nv04_instmem *imem = nv04_instmem(base);
0161 struct nvkm_device *device = imem->base.subdev.device;
0162 int ret;
0163
0164
0165 imem->base.reserved = 512 * 1024;
0166
0167 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
0168 if (ret)
0169 return ret;
0170
0171
0172 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
0173 &imem->base.vbios);
0174 if (ret)
0175 return ret;
0176
0177
0178 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
0179 if (ret)
0180 return ret;
0181
0182
0183 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
0184 &imem->base.ramfc);
0185 if (ret)
0186 return ret;
0187
0188
0189 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
0190 &imem->base.ramro);
0191 if (ret)
0192 return ret;
0193
0194 return 0;
0195 }
0196
0197 static void *
0198 nv04_instmem_dtor(struct nvkm_instmem *base)
0199 {
0200 struct nv04_instmem *imem = nv04_instmem(base);
0201 nvkm_memory_unref(&imem->base.ramfc);
0202 nvkm_memory_unref(&imem->base.ramro);
0203 nvkm_ramht_del(&imem->base.ramht);
0204 nvkm_memory_unref(&imem->base.vbios);
0205 nvkm_mm_fini(&imem->heap);
0206 return imem;
0207 }
0208
0209 static const struct nvkm_instmem_func
0210 nv04_instmem = {
0211 .dtor = nv04_instmem_dtor,
0212 .oneinit = nv04_instmem_oneinit,
0213 .rd32 = nv04_instmem_rd32,
0214 .wr32 = nv04_instmem_wr32,
0215 .memory_new = nv04_instobj_new,
0216 .zero = false,
0217 };
0218
0219 int
0220 nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0221 struct nvkm_instmem **pimem)
0222 {
0223 struct nv04_instmem *imem;
0224
0225 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
0226 return -ENOMEM;
0227 nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base);
0228 *pimem = &imem->base;
0229 return 0;
0230 }