0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "gf100.h"
0025 #include "ram.h"
0026
0027 #include <core/memory.h>
0028 #include <core/option.h>
0029 #include <subdev/therm.h>
0030
0031 void
0032 gf100_fb_intr(struct nvkm_fb *base)
0033 {
0034 struct gf100_fb *fb = gf100_fb(base);
0035 struct nvkm_subdev *subdev = &fb->base.subdev;
0036 struct nvkm_device *device = subdev->device;
0037 u32 intr = nvkm_rd32(device, 0x000100);
0038 if (intr & 0x08000000)
0039 nvkm_debug(subdev, "PFFB intr\n");
0040 if (intr & 0x00002000)
0041 nvkm_debug(subdev, "PBFB intr\n");
0042 }
0043
0044 int
0045 gf100_fb_oneinit(struct nvkm_fb *base)
0046 {
0047 struct gf100_fb *fb = gf100_fb(base);
0048 struct nvkm_device *device = fb->base.subdev.device;
0049 int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
0050
0051 size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
0052 size = max(size, 0x1000);
0053
0054 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
0055 true, &fb->base.mmu_rd);
0056 if (ret)
0057 return ret;
0058
0059 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
0060 true, &fb->base.mmu_wr);
0061 if (ret)
0062 return ret;
0063
0064 fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0065 if (fb->r100c10_page) {
0066 fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
0067 PAGE_SIZE, DMA_BIDIRECTIONAL);
0068 if (dma_mapping_error(device->dev, fb->r100c10))
0069 return -EFAULT;
0070 }
0071
0072 return 0;
0073 }
0074
0075 int
0076 gf100_fb_init_page(struct nvkm_fb *fb)
0077 {
0078 struct nvkm_device *device = fb->subdev.device;
0079 switch (fb->page) {
0080 case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
0081 case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
0082 default:
0083 return -EINVAL;
0084 }
0085 return 0;
0086 }
0087
0088 void
0089 gf100_fb_init(struct nvkm_fb *base)
0090 {
0091 struct gf100_fb *fb = gf100_fb(base);
0092 struct nvkm_device *device = fb->base.subdev.device;
0093
0094 if (fb->r100c10_page)
0095 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
0096
0097 if (base->func->clkgate_pack) {
0098 nvkm_therm_clkgate_init(device->therm,
0099 base->func->clkgate_pack);
0100 }
0101 }
0102
0103 void *
0104 gf100_fb_dtor(struct nvkm_fb *base)
0105 {
0106 struct gf100_fb *fb = gf100_fb(base);
0107 struct nvkm_device *device = fb->base.subdev.device;
0108
0109 if (fb->r100c10_page) {
0110 dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
0111 DMA_BIDIRECTIONAL);
0112 __free_page(fb->r100c10_page);
0113 }
0114
0115 return fb;
0116 }
0117
0118 int
0119 gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
0120 enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
0121 {
0122 struct gf100_fb *fb;
0123
0124 if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
0125 return -ENOMEM;
0126 nvkm_fb_ctor(func, device, type, inst, &fb->base);
0127 *pfb = &fb->base;
0128
0129 return 0;
0130 }
0131
0132 static const struct nvkm_fb_func
0133 gf100_fb = {
0134 .dtor = gf100_fb_dtor,
0135 .oneinit = gf100_fb_oneinit,
0136 .init = gf100_fb_init,
0137 .init_page = gf100_fb_init_page,
0138 .intr = gf100_fb_intr,
0139 .ram_new = gf100_ram_new,
0140 .default_bigpage = 17,
0141 };
0142
0143 int
0144 gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
0145 {
0146 return gf100_fb_new_(&gf100_fb, device, type, inst, pfb);
0147 }