0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define gk104_clk(p) container_of((p), struct gk104_clk, base)
0025 #include "priv.h"
0026 #include "pll.h"
0027
0028 #include <subdev/timer.h>
0029 #include <subdev/bios.h>
0030 #include <subdev/bios/pll.h>
0031
0032 struct gk104_clk_info {
0033 u32 freq;
0034 u32 ssel;
0035 u32 mdiv;
0036 u32 dsrc;
0037 u32 ddiv;
0038 u32 coef;
0039 };
0040
0041 struct gk104_clk {
0042 struct nvkm_clk base;
0043 struct gk104_clk_info eng[16];
0044 };
0045
0046 static u32 read_div(struct gk104_clk *, int, u32, u32);
0047 static u32 read_pll(struct gk104_clk *, u32);
0048
0049 static u32
0050 read_vco(struct gk104_clk *clk, u32 dsrc)
0051 {
0052 struct nvkm_device *device = clk->base.subdev.device;
0053 u32 ssrc = nvkm_rd32(device, dsrc);
0054 if (!(ssrc & 0x00000100))
0055 return read_pll(clk, 0x00e800);
0056 return read_pll(clk, 0x00e820);
0057 }
0058
0059 static u32
0060 read_pll(struct gk104_clk *clk, u32 pll)
0061 {
0062 struct nvkm_device *device = clk->base.subdev.device;
0063 u32 ctrl = nvkm_rd32(device, pll + 0x00);
0064 u32 coef = nvkm_rd32(device, pll + 0x04);
0065 u32 P = (coef & 0x003f0000) >> 16;
0066 u32 N = (coef & 0x0000ff00) >> 8;
0067 u32 M = (coef & 0x000000ff) >> 0;
0068 u32 sclk;
0069 u16 fN = 0xf000;
0070
0071 if (!(ctrl & 0x00000001))
0072 return 0;
0073
0074 switch (pll) {
0075 case 0x00e800:
0076 case 0x00e820:
0077 sclk = device->crystal;
0078 P = 1;
0079 break;
0080 case 0x132000:
0081 sclk = read_pll(clk, 0x132020);
0082 P = (coef & 0x10000000) ? 2 : 1;
0083 break;
0084 case 0x132020:
0085 sclk = read_div(clk, 0, 0x137320, 0x137330);
0086 fN = nvkm_rd32(device, pll + 0x10) >> 16;
0087 break;
0088 case 0x137000:
0089 case 0x137020:
0090 case 0x137040:
0091 case 0x1370e0:
0092 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
0093 break;
0094 default:
0095 return 0;
0096 }
0097
0098 if (P == 0)
0099 P = 1;
0100
0101 sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
0102 return sclk / (M * P);
0103 }
0104
0105 static u32
0106 read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
0107 {
0108 struct nvkm_device *device = clk->base.subdev.device;
0109 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
0110 u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
0111
0112 switch (ssrc & 0x00000003) {
0113 case 0:
0114 if ((ssrc & 0x00030000) != 0x00030000)
0115 return device->crystal;
0116 return 108000;
0117 case 2:
0118 return 100000;
0119 case 3:
0120 if (sctl & 0x80000000) {
0121 u32 sclk = read_vco(clk, dsrc + (doff * 4));
0122 u32 sdiv = (sctl & 0x0000003f) + 2;
0123 return (sclk * 2) / sdiv;
0124 }
0125
0126 return read_vco(clk, dsrc + (doff * 4));
0127 default:
0128 return 0;
0129 }
0130 }
0131
0132 static u32
0133 read_mem(struct gk104_clk *clk)
0134 {
0135 struct nvkm_device *device = clk->base.subdev.device;
0136 switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
0137 case 1: return read_pll(clk, 0x132020);
0138 case 2: return read_pll(clk, 0x132000);
0139 default:
0140 return 0;
0141 }
0142 }
0143
0144 static u32
0145 read_clk(struct gk104_clk *clk, int idx)
0146 {
0147 struct nvkm_device *device = clk->base.subdev.device;
0148 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
0149 u32 sclk, sdiv;
0150
0151 if (idx < 7) {
0152 u32 ssel = nvkm_rd32(device, 0x137100);
0153 if (ssel & (1 << idx)) {
0154 sclk = read_pll(clk, 0x137000 + (idx * 0x20));
0155 sdiv = 1;
0156 } else {
0157 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
0158 sdiv = 0;
0159 }
0160 } else {
0161 u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
0162 if ((ssrc & 0x00000003) == 0x00000003) {
0163 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
0164 if (ssrc & 0x00000100) {
0165 if (ssrc & 0x40000000)
0166 sclk = read_pll(clk, 0x1370e0);
0167 sdiv = 1;
0168 } else {
0169 sdiv = 0;
0170 }
0171 } else {
0172 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
0173 sdiv = 0;
0174 }
0175 }
0176
0177 if (sctl & 0x80000000) {
0178 if (sdiv)
0179 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
0180 else
0181 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
0182 return (sclk * 2) / sdiv;
0183 }
0184
0185 return sclk;
0186 }
0187
0188 static int
0189 gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
0190 {
0191 struct gk104_clk *clk = gk104_clk(base);
0192 struct nvkm_subdev *subdev = &clk->base.subdev;
0193 struct nvkm_device *device = subdev->device;
0194
0195 switch (src) {
0196 case nv_clk_src_crystal:
0197 return device->crystal;
0198 case nv_clk_src_href:
0199 return 100000;
0200 case nv_clk_src_mem:
0201 return read_mem(clk);
0202 case nv_clk_src_gpc:
0203 return read_clk(clk, 0x00);
0204 case nv_clk_src_rop:
0205 return read_clk(clk, 0x01);
0206 case nv_clk_src_hubk07:
0207 return read_clk(clk, 0x02);
0208 case nv_clk_src_hubk06:
0209 return read_clk(clk, 0x07);
0210 case nv_clk_src_hubk01:
0211 return read_clk(clk, 0x08);
0212 case nv_clk_src_pmu:
0213 return read_clk(clk, 0x0c);
0214 case nv_clk_src_vdec:
0215 return read_clk(clk, 0x0e);
0216 default:
0217 nvkm_error(subdev, "invalid clock source %d\n", src);
0218 return -EINVAL;
0219 }
0220 }
0221
0222 static u32
0223 calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
0224 {
0225 u32 div = min((ref * 2) / freq, (u32)65);
0226 if (div < 2)
0227 div = 2;
0228
0229 *ddiv = div - 2;
0230 return (ref * 2) / div;
0231 }
0232
0233 static u32
0234 calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
0235 {
0236 u32 sclk;
0237
0238
0239 *ddiv = 0x00000000;
0240 switch (freq) {
0241 case 27000:
0242 case 108000:
0243 *dsrc = 0x00000000;
0244 if (freq == 108000)
0245 *dsrc |= 0x00030000;
0246 return freq;
0247 case 100000:
0248 *dsrc = 0x00000002;
0249 return freq;
0250 default:
0251 *dsrc = 0x00000003;
0252 break;
0253 }
0254
0255
0256 sclk = read_vco(clk, 0x137160 + (idx * 4));
0257 if (idx < 7)
0258 sclk = calc_div(clk, idx, sclk, freq, ddiv);
0259 return sclk;
0260 }
0261
0262 static u32
0263 calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
0264 {
0265 struct nvkm_subdev *subdev = &clk->base.subdev;
0266 struct nvkm_bios *bios = subdev->device->bios;
0267 struct nvbios_pll limits;
0268 int N, M, P, ret;
0269
0270 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
0271 if (ret)
0272 return 0;
0273
0274 limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
0275 if (!limits.refclk)
0276 return 0;
0277
0278 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
0279 if (ret <= 0)
0280 return 0;
0281
0282 *coef = (P << 16) | (N << 8) | M;
0283 return ret;
0284 }
0285
0286 static int
0287 calc_clk(struct gk104_clk *clk,
0288 struct nvkm_cstate *cstate, int idx, int dom)
0289 {
0290 struct gk104_clk_info *info = &clk->eng[idx];
0291 u32 freq = cstate->domain[dom];
0292 u32 src0, div0, div1D, div1P = 0;
0293 u32 clk0, clk1 = 0;
0294
0295
0296 if (!freq)
0297 return 0;
0298
0299
0300 clk0 = calc_src(clk, idx, freq, &src0, &div0);
0301 clk0 = calc_div(clk, idx, clk0, freq, &div1D);
0302
0303
0304 if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
0305 if (idx <= 7)
0306 clk1 = calc_pll(clk, idx, freq, &info->coef);
0307 else
0308 clk1 = cstate->domain[nv_clk_src_hubk06];
0309 clk1 = calc_div(clk, idx, clk1, freq, &div1P);
0310 }
0311
0312
0313 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
0314 info->dsrc = src0;
0315 if (div0) {
0316 info->ddiv |= 0x80000000;
0317 info->ddiv |= div0;
0318 }
0319 if (div1D) {
0320 info->mdiv |= 0x80000000;
0321 info->mdiv |= div1D;
0322 }
0323 info->ssel = 0;
0324 info->freq = clk0;
0325 } else {
0326 if (div1P) {
0327 info->mdiv |= 0x80000000;
0328 info->mdiv |= div1P << 8;
0329 }
0330 info->ssel = (1 << idx);
0331 info->dsrc = 0x40000100;
0332 info->freq = clk1;
0333 }
0334
0335 return 0;
0336 }
0337
0338 static int
0339 gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
0340 {
0341 struct gk104_clk *clk = gk104_clk(base);
0342 int ret;
0343
0344 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
0345 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
0346 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
0347 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
0348 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
0349 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
0350 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
0351 return ret;
0352
0353 return 0;
0354 }
0355
0356 static void
0357 gk104_clk_prog_0(struct gk104_clk *clk, int idx)
0358 {
0359 struct gk104_clk_info *info = &clk->eng[idx];
0360 struct nvkm_device *device = clk->base.subdev.device;
0361 if (!info->ssel) {
0362 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
0363 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
0364 }
0365 }
0366
0367 static void
0368 gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
0369 {
0370 struct nvkm_device *device = clk->base.subdev.device;
0371 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
0372 nvkm_msec(device, 2000,
0373 if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
0374 break;
0375 );
0376 }
0377
0378 static void
0379 gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
0380 {
0381 struct nvkm_device *device = clk->base.subdev.device;
0382 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
0383 }
0384
0385 static void
0386 gk104_clk_prog_2(struct gk104_clk *clk, int idx)
0387 {
0388 struct gk104_clk_info *info = &clk->eng[idx];
0389 struct nvkm_device *device = clk->base.subdev.device;
0390 const u32 addr = 0x137000 + (idx * 0x20);
0391 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
0392 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
0393 if (info->coef) {
0394 nvkm_wr32(device, addr + 0x04, info->coef);
0395 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
0396
0397
0398 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
0399 nvkm_msec(device, 2000,
0400 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
0401 break;
0402 );
0403 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
0404
0405
0406 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
0407 }
0408 }
0409
0410 static void
0411 gk104_clk_prog_3(struct gk104_clk *clk, int idx)
0412 {
0413 struct gk104_clk_info *info = &clk->eng[idx];
0414 struct nvkm_device *device = clk->base.subdev.device;
0415 if (info->ssel)
0416 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
0417 else
0418 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
0419 }
0420
0421 static void
0422 gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
0423 {
0424 struct gk104_clk_info *info = &clk->eng[idx];
0425 struct nvkm_device *device = clk->base.subdev.device;
0426 if (info->ssel) {
0427 nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
0428 nvkm_msec(device, 2000,
0429 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
0430 if (tmp == info->ssel)
0431 break;
0432 );
0433 }
0434 }
0435
0436 static void
0437 gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
0438 {
0439 struct gk104_clk_info *info = &clk->eng[idx];
0440 struct nvkm_device *device = clk->base.subdev.device;
0441 if (info->ssel) {
0442 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
0443 nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
0444 }
0445 }
0446
0447 static int
0448 gk104_clk_prog(struct nvkm_clk *base)
0449 {
0450 struct gk104_clk *clk = gk104_clk(base);
0451 struct {
0452 u32 mask;
0453 void (*exec)(struct gk104_clk *, int);
0454 } stage[] = {
0455 { 0x007f, gk104_clk_prog_0 },
0456 { 0x007f, gk104_clk_prog_1_0 },
0457 { 0xff80, gk104_clk_prog_1_1 },
0458 { 0x00ff, gk104_clk_prog_2 },
0459 { 0xff80, gk104_clk_prog_3 },
0460 { 0x007f, gk104_clk_prog_4_0 },
0461 { 0xff80, gk104_clk_prog_4_1 },
0462 };
0463 int i, j;
0464
0465 for (i = 0; i < ARRAY_SIZE(stage); i++) {
0466 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
0467 if (!(stage[i].mask & (1 << j)))
0468 continue;
0469 if (!clk->eng[j].freq)
0470 continue;
0471 stage[i].exec(clk, j);
0472 }
0473 }
0474
0475 return 0;
0476 }
0477
0478 static void
0479 gk104_clk_tidy(struct nvkm_clk *base)
0480 {
0481 struct gk104_clk *clk = gk104_clk(base);
0482 memset(clk->eng, 0x00, sizeof(clk->eng));
0483 }
0484
0485 static const struct nvkm_clk_func
0486 gk104_clk = {
0487 .read = gk104_clk_read,
0488 .calc = gk104_clk_calc,
0489 .prog = gk104_clk_prog,
0490 .tidy = gk104_clk_tidy,
0491 .domains = {
0492 { nv_clk_src_crystal, 0xff },
0493 { nv_clk_src_href , 0xff },
0494 { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
0495 { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
0496 { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
0497 { nv_clk_src_mem , 0x03, 0, "memory", 500 },
0498 { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
0499 { nv_clk_src_hubk01 , 0x05 },
0500 { nv_clk_src_vdec , 0x06 },
0501 { nv_clk_src_pmu , 0x07 },
0502 { nv_clk_src_max }
0503 }
0504 };
0505
0506 int
0507 gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0508 struct nvkm_clk **pclk)
0509 {
0510 struct gk104_clk *clk;
0511
0512 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
0513 return -ENOMEM;
0514 *pclk = &clk->base;
0515
0516 return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base);
0517 }