0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "priv.h"
0025 #include "ram.h"
0026
0027 #include <core/memory.h>
0028 #include <core/option.h>
0029 #include <subdev/bios.h>
0030 #include <subdev/bios/M0203.h>
0031 #include <engine/gr.h>
0032 #include <engine/mpeg.h>
0033
0034 void
0035 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
0036 {
0037 fb->func->tile.fini(fb, region, tile);
0038 }
0039
0040 void
0041 nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
0042 u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
0043 {
0044 fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
0045 }
0046
0047 void
0048 nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
0049 {
0050 struct nvkm_device *device = fb->subdev.device;
0051 if (fb->func->tile.prog) {
0052 fb->func->tile.prog(fb, region, tile);
0053 if (device->gr)
0054 nvkm_engine_tile(&device->gr->engine, region);
0055 if (device->mpeg)
0056 nvkm_engine_tile(device->mpeg, region);
0057 }
0058 }
0059
0060 int
0061 nvkm_fb_bios_memtype(struct nvkm_bios *bios)
0062 {
0063 struct nvkm_subdev *subdev = &bios->subdev;
0064 struct nvkm_device *device = subdev->device;
0065 const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
0066 struct nvbios_M0203E M0203E;
0067 u8 ver, hdr;
0068
0069 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
0070 switch (M0203E.type) {
0071 case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
0072 case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
0073 case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
0074 case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
0075 case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
0076 case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
0077 case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2;
0078 default:
0079 nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
0080 return NVKM_RAM_TYPE_UNKNOWN;
0081 }
0082 }
0083
0084 nvkm_warn(subdev, "M0203E not matched!\n");
0085 return NVKM_RAM_TYPE_UNKNOWN;
0086 }
0087
0088 static void
0089 nvkm_fb_intr(struct nvkm_subdev *subdev)
0090 {
0091 struct nvkm_fb *fb = nvkm_fb(subdev);
0092 if (fb->func->intr)
0093 fb->func->intr(fb);
0094 }
0095
0096 static int
0097 nvkm_fb_oneinit(struct nvkm_subdev *subdev)
0098 {
0099 struct nvkm_fb *fb = nvkm_fb(subdev);
0100 u32 tags = 0;
0101
0102 if (fb->func->ram_new) {
0103 int ret = fb->func->ram_new(fb, &fb->ram);
0104 if (ret) {
0105 nvkm_error(subdev, "vram setup failed, %d\n", ret);
0106 return ret;
0107 }
0108 }
0109
0110 if (fb->func->oneinit) {
0111 int ret = fb->func->oneinit(fb);
0112 if (ret)
0113 return ret;
0114 }
0115
0116
0117
0118
0119
0120 if (fb->func->tags) {
0121 tags = fb->func->tags(fb);
0122 nvkm_debug(subdev, "%d comptags\n", tags);
0123 }
0124
0125 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
0126 }
0127
0128 static int
0129 nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
0130 {
0131 struct nvkm_subdev *subdev = &fb->subdev;
0132 int ret;
0133
0134 nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
0135
0136 if (!fb->vpr_scrubber.size) {
0137 nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
0138 return 0;
0139 }
0140
0141 ret = fb->func->vpr.scrub(fb);
0142 if (ret) {
0143 nvkm_error(subdev, "VPR scrubber binary failed\n");
0144 return ret;
0145 }
0146
0147 if (fb->func->vpr.scrub_required(fb)) {
0148 nvkm_error(subdev, "VPR still locked after scrub!\n");
0149 return -EIO;
0150 }
0151
0152 nvkm_debug(subdev, "VPR scrubber binary successful\n");
0153 return 0;
0154 }
0155
0156 static int
0157 nvkm_fb_init(struct nvkm_subdev *subdev)
0158 {
0159 struct nvkm_fb *fb = nvkm_fb(subdev);
0160 int ret, i;
0161
0162 if (fb->ram) {
0163 ret = nvkm_ram_init(fb->ram);
0164 if (ret)
0165 return ret;
0166 }
0167
0168 for (i = 0; i < fb->tile.regions; i++)
0169 fb->func->tile.prog(fb, i, &fb->tile.region[i]);
0170
0171 if (fb->func->init)
0172 fb->func->init(fb);
0173
0174 if (fb->func->init_remapper)
0175 fb->func->init_remapper(fb);
0176
0177 if (fb->func->init_page) {
0178 ret = fb->func->init_page(fb);
0179 if (WARN_ON(ret))
0180 return ret;
0181 }
0182
0183 if (fb->func->init_unkn)
0184 fb->func->init_unkn(fb);
0185
0186 if (fb->func->vpr.scrub_required &&
0187 fb->func->vpr.scrub_required(fb)) {
0188 ret = nvkm_fb_init_scrub_vpr(fb);
0189 if (ret)
0190 return ret;
0191 }
0192
0193 return 0;
0194 }
0195
0196 static void *
0197 nvkm_fb_dtor(struct nvkm_subdev *subdev)
0198 {
0199 struct nvkm_fb *fb = nvkm_fb(subdev);
0200 int i;
0201
0202 nvkm_memory_unref(&fb->mmu_wr);
0203 nvkm_memory_unref(&fb->mmu_rd);
0204
0205 for (i = 0; i < fb->tile.regions; i++)
0206 fb->func->tile.fini(fb, i, &fb->tile.region[i]);
0207
0208 nvkm_mm_fini(&fb->tags.mm);
0209 mutex_destroy(&fb->tags.mutex);
0210
0211 nvkm_ram_del(&fb->ram);
0212
0213 nvkm_blob_dtor(&fb->vpr_scrubber);
0214
0215 if (fb->func->dtor)
0216 return fb->func->dtor(fb);
0217 return fb;
0218 }
0219
0220 static const struct nvkm_subdev_func
0221 nvkm_fb = {
0222 .dtor = nvkm_fb_dtor,
0223 .oneinit = nvkm_fb_oneinit,
0224 .init = nvkm_fb_init,
0225 .intr = nvkm_fb_intr,
0226 };
0227
0228 void
0229 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
0230 enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
0231 {
0232 nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev);
0233 fb->func = func;
0234 fb->tile.regions = fb->func->tile.regions;
0235 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
0236 mutex_init(&fb->tags.mutex);
0237 }
0238
0239 int
0240 nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
0241 enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
0242 {
0243 if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
0244 return -ENOMEM;
0245 nvkm_fb_ctor(func, device, type, inst, *pfb);
0246 return 0;
0247 }