0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
0025 #include "priv.h"
0026
0027 #include <core/ramht.h>
0028 #include <engine/gr/nv40.h>
0029
0030 struct nv40_instmem {
0031 struct nvkm_instmem base;
0032 struct nvkm_mm heap;
0033 void __iomem *iomem;
0034 };
0035
0036
0037
0038
0039 #define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
0040
0041 struct nv40_instobj {
0042 struct nvkm_instobj base;
0043 struct nv40_instmem *imem;
0044 struct nvkm_mm_node *node;
0045 };
0046
0047 static void
0048 nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
0049 {
0050 struct nv40_instobj *iobj = nv40_instobj(memory);
0051 iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
0052 }
0053
0054 static u32
0055 nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
0056 {
0057 struct nv40_instobj *iobj = nv40_instobj(memory);
0058 return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
0059 }
0060
0061 static const struct nvkm_memory_ptrs
0062 nv40_instobj_ptrs = {
0063 .rd32 = nv40_instobj_rd32,
0064 .wr32 = nv40_instobj_wr32,
0065 };
0066
0067 static void
0068 nv40_instobj_release(struct nvkm_memory *memory)
0069 {
0070 wmb();
0071 }
0072
0073 static void __iomem *
0074 nv40_instobj_acquire(struct nvkm_memory *memory)
0075 {
0076 struct nv40_instobj *iobj = nv40_instobj(memory);
0077 return iobj->imem->iomem + iobj->node->offset;
0078 }
0079
0080 static u64
0081 nv40_instobj_size(struct nvkm_memory *memory)
0082 {
0083 return nv40_instobj(memory)->node->length;
0084 }
0085
0086 static u64
0087 nv40_instobj_addr(struct nvkm_memory *memory)
0088 {
0089 return nv40_instobj(memory)->node->offset;
0090 }
0091
0092 static enum nvkm_memory_target
0093 nv40_instobj_target(struct nvkm_memory *memory)
0094 {
0095 return NVKM_MEM_TARGET_INST;
0096 }
0097
0098 static void *
0099 nv40_instobj_dtor(struct nvkm_memory *memory)
0100 {
0101 struct nv40_instobj *iobj = nv40_instobj(memory);
0102 mutex_lock(&iobj->imem->base.mutex);
0103 nvkm_mm_free(&iobj->imem->heap, &iobj->node);
0104 mutex_unlock(&iobj->imem->base.mutex);
0105 nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
0106 return iobj;
0107 }
0108
0109 static const struct nvkm_memory_func
0110 nv40_instobj_func = {
0111 .dtor = nv40_instobj_dtor,
0112 .target = nv40_instobj_target,
0113 .size = nv40_instobj_size,
0114 .addr = nv40_instobj_addr,
0115 .acquire = nv40_instobj_acquire,
0116 .release = nv40_instobj_release,
0117 };
0118
0119 static int
0120 nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
0121 struct nvkm_memory **pmemory)
0122 {
0123 struct nv40_instmem *imem = nv40_instmem(base);
0124 struct nv40_instobj *iobj;
0125 int ret;
0126
0127 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
0128 return -ENOMEM;
0129 *pmemory = &iobj->base.memory;
0130
0131 nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
0132 iobj->base.memory.ptrs = &nv40_instobj_ptrs;
0133 iobj->imem = imem;
0134
0135 mutex_lock(&imem->base.mutex);
0136 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
0137 mutex_unlock(&imem->base.mutex);
0138 return ret;
0139 }
0140
0141
0142
0143
0144
0145 static u32
0146 nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
0147 {
0148 return ioread32_native(nv40_instmem(base)->iomem + addr);
0149 }
0150
0151 static void
0152 nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
0153 {
0154 iowrite32_native(data, nv40_instmem(base)->iomem + addr);
0155 }
0156
0157 static int
0158 nv40_instmem_oneinit(struct nvkm_instmem *base)
0159 {
0160 struct nv40_instmem *imem = nv40_instmem(base);
0161 struct nvkm_device *device = imem->base.subdev.device;
0162 int ret, vs;
0163
0164
0165
0166
0167
0168 vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
0169 if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
0170 else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
0171 else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs;
0172 else imem->base.reserved = 0x4a40 * vs;
0173 imem->base.reserved += 16 * 1024;
0174 imem->base.reserved *= 32;
0175 imem->base.reserved += 512 * 1024;
0176 imem->base.reserved += 512 * 1024;
0177 imem->base.reserved = round_up(imem->base.reserved, 4096);
0178
0179 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
0180 if (ret)
0181 return ret;
0182
0183
0184 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
0185 &imem->base.vbios);
0186 if (ret)
0187 return ret;
0188
0189
0190 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
0191 if (ret)
0192 return ret;
0193
0194
0195
0196
0197 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
0198 &imem->base.ramro);
0199 if (ret)
0200 return ret;
0201
0202
0203
0204
0205 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
0206 &imem->base.ramfc);
0207 if (ret)
0208 return ret;
0209
0210 return 0;
0211 }
0212
0213 static void *
0214 nv40_instmem_dtor(struct nvkm_instmem *base)
0215 {
0216 struct nv40_instmem *imem = nv40_instmem(base);
0217 nvkm_memory_unref(&imem->base.ramfc);
0218 nvkm_memory_unref(&imem->base.ramro);
0219 nvkm_ramht_del(&imem->base.ramht);
0220 nvkm_memory_unref(&imem->base.vbios);
0221 nvkm_mm_fini(&imem->heap);
0222 if (imem->iomem)
0223 iounmap(imem->iomem);
0224 return imem;
0225 }
0226
0227 static const struct nvkm_instmem_func
0228 nv40_instmem = {
0229 .dtor = nv40_instmem_dtor,
0230 .oneinit = nv40_instmem_oneinit,
0231 .rd32 = nv40_instmem_rd32,
0232 .wr32 = nv40_instmem_wr32,
0233 .memory_new = nv40_instobj_new,
0234 .zero = false,
0235 };
0236
0237 int
0238 nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0239 struct nvkm_instmem **pimem)
0240 {
0241 struct nv40_instmem *imem;
0242 int bar;
0243
0244 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
0245 return -ENOMEM;
0246 nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
0247 *pimem = &imem->base;
0248
0249
0250 if (device->func->resource_size(device, 2))
0251 bar = 2;
0252 else
0253 bar = 3;
0254
0255 imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
0256 device->func->resource_size(device, bar));
0257 if (!imem->iomem) {
0258 nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
0259 return -EFAULT;
0260 }
0261
0262 return 0;
0263 }