0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "priv.h"
0025 #include "fuc/gt215.fuc3.h"
0026
0027 #include <subdev/timer.h>
0028
0029 int
0030 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
0031 u32 process, u32 message, u32 data0, u32 data1)
0032 {
0033 struct nvkm_subdev *subdev = &pmu->subdev;
0034 struct nvkm_device *device = subdev->device;
0035 u32 addr;
0036
0037 mutex_lock(&pmu->send.mutex);
0038
0039 addr = nvkm_rd32(device, 0x10a4a0);
0040 if (nvkm_msec(device, 2000,
0041 u32 tmp = nvkm_rd32(device, 0x10a4b0);
0042 if (tmp != (addr ^ 8))
0043 break;
0044 ) < 0) {
0045 mutex_unlock(&pmu->send.mutex);
0046 return -EBUSY;
0047 }
0048
0049
0050
0051
0052
0053 if (reply) {
0054 pmu->recv.message = message;
0055 pmu->recv.process = process;
0056 }
0057
0058
0059 do {
0060 nvkm_wr32(device, 0x10a580, 0x00000001);
0061 } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
0062
0063
0064 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
0065 pmu->send.base));
0066 nvkm_wr32(device, 0x10a1c4, process);
0067 nvkm_wr32(device, 0x10a1c4, message);
0068 nvkm_wr32(device, 0x10a1c4, data0);
0069 nvkm_wr32(device, 0x10a1c4, data1);
0070 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
0071
0072
0073 nvkm_wr32(device, 0x10a580, 0x00000000);
0074
0075
0076 if (reply) {
0077 wait_event(pmu->recv.wait, (pmu->recv.process == 0));
0078 reply[0] = pmu->recv.data[0];
0079 reply[1] = pmu->recv.data[1];
0080 }
0081
0082 mutex_unlock(&pmu->send.mutex);
0083 return 0;
0084 }
0085
0086 void
0087 gt215_pmu_recv(struct nvkm_pmu *pmu)
0088 {
0089 struct nvkm_subdev *subdev = &pmu->subdev;
0090 struct nvkm_device *device = subdev->device;
0091 u32 process, message, data0, data1;
0092
0093
0094 u32 addr = nvkm_rd32(device, 0x10a4cc);
0095 if (addr == nvkm_rd32(device, 0x10a4c8))
0096 return;
0097
0098
0099 do {
0100 nvkm_wr32(device, 0x10a580, 0x00000002);
0101 } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
0102
0103
0104 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
0105 pmu->recv.base));
0106 process = nvkm_rd32(device, 0x10a1c4);
0107 message = nvkm_rd32(device, 0x10a1c4);
0108 data0 = nvkm_rd32(device, 0x10a1c4);
0109 data1 = nvkm_rd32(device, 0x10a1c4);
0110 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
0111
0112
0113 nvkm_wr32(device, 0x10a580, 0x00000000);
0114
0115
0116 if (pmu->recv.process) {
0117 if (process == pmu->recv.process &&
0118 message == pmu->recv.message) {
0119 pmu->recv.data[0] = data0;
0120 pmu->recv.data[1] = data1;
0121 pmu->recv.process = 0;
0122 wake_up(&pmu->recv.wait);
0123 return;
0124 }
0125 }
0126
0127
0128
0129
0130 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
0131 (char)((process & 0x000000ff) >> 0),
0132 (char)((process & 0x0000ff00) >> 8),
0133 (char)((process & 0x00ff0000) >> 16),
0134 (char)((process & 0xff000000) >> 24),
0135 process, message, data0, data1);
0136 }
0137
0138 void
0139 gt215_pmu_intr(struct nvkm_pmu *pmu)
0140 {
0141 struct nvkm_subdev *subdev = &pmu->subdev;
0142 struct nvkm_device *device = subdev->device;
0143 u32 disp = nvkm_rd32(device, 0x10a01c);
0144 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
0145
0146 if (intr & 0x00000020) {
0147 u32 stat = nvkm_rd32(device, 0x10a16c);
0148 if (stat & 0x80000000) {
0149 nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
0150 stat & 0x00ffffff,
0151 nvkm_rd32(device, 0x10a168));
0152 nvkm_wr32(device, 0x10a16c, 0x00000000);
0153 intr &= ~0x00000020;
0154 }
0155 }
0156
0157 if (intr & 0x00000040) {
0158 schedule_work(&pmu->recv.work);
0159 nvkm_wr32(device, 0x10a004, 0x00000040);
0160 intr &= ~0x00000040;
0161 }
0162
0163 if (intr & 0x00000080) {
0164 nvkm_info(subdev, "wr32 %06x %08x\n",
0165 nvkm_rd32(device, 0x10a7a0),
0166 nvkm_rd32(device, 0x10a7a4));
0167 nvkm_wr32(device, 0x10a004, 0x00000080);
0168 intr &= ~0x00000080;
0169 }
0170
0171 if (intr) {
0172 nvkm_error(subdev, "intr %08x\n", intr);
0173 nvkm_wr32(device, 0x10a004, intr);
0174 }
0175 }
0176
0177 void
0178 gt215_pmu_fini(struct nvkm_pmu *pmu)
0179 {
0180 nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
0181 }
0182
0183 static void
0184 gt215_pmu_reset(struct nvkm_pmu *pmu)
0185 {
0186 struct nvkm_device *device = pmu->subdev.device;
0187 nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
0188 nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
0189 nvkm_rd32(device, 0x022210);
0190 }
0191
0192 static bool
0193 gt215_pmu_enabled(struct nvkm_pmu *pmu)
0194 {
0195 return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
0196 }
0197
0198 int
0199 gt215_pmu_init(struct nvkm_pmu *pmu)
0200 {
0201 struct nvkm_device *device = pmu->subdev.device;
0202 int i;
0203
0204
0205 nvkm_wr32(device, 0x10a1c0, 0x01000000);
0206 for (i = 0; i < pmu->func->data.size / 4; i++)
0207 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
0208
0209
0210 nvkm_wr32(device, 0x10a180, 0x01000000);
0211 for (i = 0; i < pmu->func->code.size / 4; i++) {
0212 if ((i & 0x3f) == 0)
0213 nvkm_wr32(device, 0x10a188, i >> 6);
0214 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
0215 }
0216
0217
0218 nvkm_wr32(device, 0x10a10c, 0x00000000);
0219 nvkm_wr32(device, 0x10a104, 0x00000000);
0220 nvkm_wr32(device, 0x10a100, 0x00000002);
0221
0222
0223 if (nvkm_msec(device, 2000,
0224 if (nvkm_rd32(device, 0x10a4d0))
0225 break;
0226 ) < 0)
0227 return -EBUSY;
0228 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
0229 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
0230
0231
0232 if (nvkm_msec(device, 2000,
0233 if (nvkm_rd32(device, 0x10a4dc))
0234 break;
0235 ) < 0)
0236 return -EBUSY;
0237 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
0238 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
0239
0240 nvkm_wr32(device, 0x10a010, 0x000000e0);
0241 return 0;
0242 }
0243
0244 const struct nvkm_falcon_func
0245 gt215_pmu_flcn = {
0246 .debug = 0xc08,
0247 .fbif = 0xe00,
0248 .load_imem = nvkm_falcon_v1_load_imem,
0249 .load_dmem = nvkm_falcon_v1_load_dmem,
0250 .read_dmem = nvkm_falcon_v1_read_dmem,
0251 .bind_context = nvkm_falcon_v1_bind_context,
0252 .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
0253 .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
0254 .set_start_addr = nvkm_falcon_v1_set_start_addr,
0255 .start = nvkm_falcon_v1_start,
0256 .enable = nvkm_falcon_v1_enable,
0257 .disable = nvkm_falcon_v1_disable,
0258 .cmdq = { 0x4a0, 0x4b0, 4 },
0259 .msgq = { 0x4c8, 0x4cc, 0 },
0260 };
0261
0262 static const struct nvkm_pmu_func
0263 gt215_pmu = {
0264 .flcn = >215_pmu_flcn,
0265 .code.data = gt215_pmu_code,
0266 .code.size = sizeof(gt215_pmu_code),
0267 .data.data = gt215_pmu_data,
0268 .data.size = sizeof(gt215_pmu_data),
0269 .enabled = gt215_pmu_enabled,
0270 .reset = gt215_pmu_reset,
0271 .init = gt215_pmu_init,
0272 .fini = gt215_pmu_fini,
0273 .intr = gt215_pmu_intr,
0274 .send = gt215_pmu_send,
0275 .recv = gt215_pmu_recv,
0276 };
0277
0278 static const struct nvkm_pmu_fwif
0279 gt215_pmu_fwif[] = {
0280 { -1, gf100_pmu_nofw, >215_pmu },
0281 {}
0282 };
0283
0284 int
0285 gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0286 struct nvkm_pmu **ppmu)
0287 {
0288 return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu);
0289 }