0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define gf100_clk(p) container_of((p), struct gf100_clk, base)
0025 #include "priv.h"
0026 #include "pll.h"
0027
0028 #include <subdev/bios.h>
0029 #include <subdev/bios/pll.h>
0030 #include <subdev/timer.h>
0031
0032 struct gf100_clk_info {
0033 u32 freq;
0034 u32 ssel;
0035 u32 mdiv;
0036 u32 dsrc;
0037 u32 ddiv;
0038 u32 coef;
0039 };
0040
0041 struct gf100_clk {
0042 struct nvkm_clk base;
0043 struct gf100_clk_info eng[16];
0044 };
0045
0046 static u32 read_div(struct gf100_clk *, int, u32, u32);
0047
0048 static u32
0049 read_vco(struct gf100_clk *clk, u32 dsrc)
0050 {
0051 struct nvkm_device *device = clk->base.subdev.device;
0052 u32 ssrc = nvkm_rd32(device, dsrc);
0053 if (!(ssrc & 0x00000100))
0054 return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
0055 return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
0056 }
0057
0058 static u32
0059 read_pll(struct gf100_clk *clk, u32 pll)
0060 {
0061 struct nvkm_device *device = clk->base.subdev.device;
0062 u32 ctrl = nvkm_rd32(device, pll + 0x00);
0063 u32 coef = nvkm_rd32(device, pll + 0x04);
0064 u32 P = (coef & 0x003f0000) >> 16;
0065 u32 N = (coef & 0x0000ff00) >> 8;
0066 u32 M = (coef & 0x000000ff) >> 0;
0067 u32 sclk;
0068
0069 if (!(ctrl & 0x00000001))
0070 return 0;
0071
0072 switch (pll) {
0073 case 0x00e800:
0074 case 0x00e820:
0075 sclk = device->crystal;
0076 P = 1;
0077 break;
0078 case 0x132000:
0079 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
0080 break;
0081 case 0x132020:
0082 sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
0083 break;
0084 case 0x137000:
0085 case 0x137020:
0086 case 0x137040:
0087 case 0x1370e0:
0088 sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
0089 break;
0090 default:
0091 return 0;
0092 }
0093
0094 return sclk * N / M / P;
0095 }
0096
0097 static u32
0098 read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
0099 {
0100 struct nvkm_device *device = clk->base.subdev.device;
0101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
0102 u32 sclk, sctl, sdiv = 2;
0103
0104 switch (ssrc & 0x00000003) {
0105 case 0:
0106 if ((ssrc & 0x00030000) != 0x00030000)
0107 return device->crystal;
0108 return 108000;
0109 case 2:
0110 return 100000;
0111 case 3:
0112 sclk = read_vco(clk, dsrc + (doff * 4));
0113
0114
0115 if (doff <= 2) {
0116 sctl = nvkm_rd32(device, dctl + (doff * 4));
0117
0118 if (sctl & 0x80000000) {
0119 if (ssrc & 0x100)
0120 sctl >>= 8;
0121
0122 sdiv = (sctl & 0x3f) + 2;
0123 }
0124 }
0125
0126 return (sclk * 2) / sdiv;
0127 default:
0128 return 0;
0129 }
0130 }
0131
0132 static u32
0133 read_clk(struct gf100_clk *clk, int idx)
0134 {
0135 struct nvkm_device *device = clk->base.subdev.device;
0136 u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
0137 u32 ssel = nvkm_rd32(device, 0x137100);
0138 u32 sclk, sdiv;
0139
0140 if (ssel & (1 << idx)) {
0141 if (idx < 7)
0142 sclk = read_pll(clk, 0x137000 + (idx * 0x20));
0143 else
0144 sclk = read_pll(clk, 0x1370e0);
0145 sdiv = ((sctl & 0x00003f00) >> 8) + 2;
0146 } else {
0147 sclk = read_div(clk, idx, 0x137160, 0x1371d0);
0148 sdiv = ((sctl & 0x0000003f) >> 0) + 2;
0149 }
0150
0151 if (sctl & 0x80000000)
0152 return (sclk * 2) / sdiv;
0153
0154 return sclk;
0155 }
0156
0157 static int
0158 gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
0159 {
0160 struct gf100_clk *clk = gf100_clk(base);
0161 struct nvkm_subdev *subdev = &clk->base.subdev;
0162 struct nvkm_device *device = subdev->device;
0163
0164 switch (src) {
0165 case nv_clk_src_crystal:
0166 return device->crystal;
0167 case nv_clk_src_href:
0168 return 100000;
0169 case nv_clk_src_sppll0:
0170 return read_pll(clk, 0x00e800);
0171 case nv_clk_src_sppll1:
0172 return read_pll(clk, 0x00e820);
0173
0174 case nv_clk_src_mpllsrcref:
0175 return read_div(clk, 0, 0x137320, 0x137330);
0176 case nv_clk_src_mpllsrc:
0177 return read_pll(clk, 0x132020);
0178 case nv_clk_src_mpll:
0179 return read_pll(clk, 0x132000);
0180 case nv_clk_src_mdiv:
0181 return read_div(clk, 0, 0x137300, 0x137310);
0182 case nv_clk_src_mem:
0183 if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
0184 return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
0185 return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
0186
0187 case nv_clk_src_gpc:
0188 return read_clk(clk, 0x00);
0189 case nv_clk_src_rop:
0190 return read_clk(clk, 0x01);
0191 case nv_clk_src_hubk07:
0192 return read_clk(clk, 0x02);
0193 case nv_clk_src_hubk06:
0194 return read_clk(clk, 0x07);
0195 case nv_clk_src_hubk01:
0196 return read_clk(clk, 0x08);
0197 case nv_clk_src_copy:
0198 return read_clk(clk, 0x09);
0199 case nv_clk_src_pmu:
0200 return read_clk(clk, 0x0c);
0201 case nv_clk_src_vdec:
0202 return read_clk(clk, 0x0e);
0203 default:
0204 nvkm_error(subdev, "invalid clock source %d\n", src);
0205 return -EINVAL;
0206 }
0207 }
0208
0209 static u32
0210 calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
0211 {
0212 u32 div = min((ref * 2) / freq, (u32)65);
0213 if (div < 2)
0214 div = 2;
0215
0216 *ddiv = div - 2;
0217 return (ref * 2) / div;
0218 }
0219
0220 static u32
0221 calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
0222 {
0223 u32 sclk;
0224
0225
0226 *ddiv = 0x00000000;
0227 switch (freq) {
0228 case 27000:
0229 case 108000:
0230 *dsrc = 0x00000000;
0231 if (freq == 108000)
0232 *dsrc |= 0x00030000;
0233 return freq;
0234 case 100000:
0235 *dsrc = 0x00000002;
0236 return freq;
0237 default:
0238 *dsrc = 0x00000003;
0239 break;
0240 }
0241
0242
0243 sclk = read_vco(clk, 0x137160 + (idx * 4));
0244 if (idx < 7)
0245 sclk = calc_div(clk, idx, sclk, freq, ddiv);
0246 return sclk;
0247 }
0248
0249 static u32
0250 calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
0251 {
0252 struct nvkm_subdev *subdev = &clk->base.subdev;
0253 struct nvkm_bios *bios = subdev->device->bios;
0254 struct nvbios_pll limits;
0255 int N, M, P, ret;
0256
0257 ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
0258 if (ret)
0259 return 0;
0260
0261 limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
0262 if (!limits.refclk)
0263 return 0;
0264
0265 ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
0266 if (ret <= 0)
0267 return 0;
0268
0269 *coef = (P << 16) | (N << 8) | M;
0270 return ret;
0271 }
0272
0273 static int
0274 calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
0275 {
0276 struct gf100_clk_info *info = &clk->eng[idx];
0277 u32 freq = cstate->domain[dom];
0278 u32 src0, div0, div1D, div1P = 0;
0279 u32 clk0, clk1 = 0;
0280
0281
0282 if (!freq)
0283 return 0;
0284
0285
0286 clk0 = calc_src(clk, idx, freq, &src0, &div0);
0287 clk0 = calc_div(clk, idx, clk0, freq, &div1D);
0288
0289
0290 if (clk0 != freq && (0x00004387 & (1 << idx))) {
0291 if (idx <= 7)
0292 clk1 = calc_pll(clk, idx, freq, &info->coef);
0293 else
0294 clk1 = cstate->domain[nv_clk_src_hubk06];
0295 clk1 = calc_div(clk, idx, clk1, freq, &div1P);
0296 }
0297
0298
0299 if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
0300 info->dsrc = src0;
0301 if (div0) {
0302 info->ddiv |= 0x80000000;
0303 info->ddiv |= div0 << 8;
0304 info->ddiv |= div0;
0305 }
0306 if (div1D) {
0307 info->mdiv |= 0x80000000;
0308 info->mdiv |= div1D;
0309 }
0310 info->ssel = info->coef = 0;
0311 info->freq = clk0;
0312 } else {
0313 if (div1P) {
0314 info->mdiv |= 0x80000000;
0315 info->mdiv |= div1P << 8;
0316 }
0317 info->ssel = (1 << idx);
0318 info->freq = clk1;
0319 }
0320
0321 return 0;
0322 }
0323
0324 static int
0325 gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
0326 {
0327 struct gf100_clk *clk = gf100_clk(base);
0328 int ret;
0329
0330 if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
0331 (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
0332 (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
0333 (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
0334 (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
0335 (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
0336 (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
0337 (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
0338 return ret;
0339
0340 return 0;
0341 }
0342
0343 static void
0344 gf100_clk_prog_0(struct gf100_clk *clk, int idx)
0345 {
0346 struct gf100_clk_info *info = &clk->eng[idx];
0347 struct nvkm_device *device = clk->base.subdev.device;
0348 if (idx < 7 && !info->ssel) {
0349 nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
0350 nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
0351 }
0352 }
0353
0354 static void
0355 gf100_clk_prog_1(struct gf100_clk *clk, int idx)
0356 {
0357 struct nvkm_device *device = clk->base.subdev.device;
0358 nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
0359 nvkm_msec(device, 2000,
0360 if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
0361 break;
0362 );
0363 }
0364
0365 static void
0366 gf100_clk_prog_2(struct gf100_clk *clk, int idx)
0367 {
0368 struct gf100_clk_info *info = &clk->eng[idx];
0369 struct nvkm_device *device = clk->base.subdev.device;
0370 const u32 addr = 0x137000 + (idx * 0x20);
0371 if (idx <= 7) {
0372 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
0373 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
0374 if (info->coef) {
0375 nvkm_wr32(device, addr + 0x04, info->coef);
0376 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
0377
0378
0379 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
0380 nvkm_msec(device, 2000,
0381 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
0382 break;
0383 );
0384 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
0385
0386
0387 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
0388 }
0389 }
0390 }
0391
0392 static void
0393 gf100_clk_prog_3(struct gf100_clk *clk, int idx)
0394 {
0395 struct gf100_clk_info *info = &clk->eng[idx];
0396 struct nvkm_device *device = clk->base.subdev.device;
0397 if (info->ssel) {
0398 nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
0399 nvkm_msec(device, 2000,
0400 u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
0401 if (tmp == info->ssel)
0402 break;
0403 );
0404 }
0405 }
0406
0407 static void
0408 gf100_clk_prog_4(struct gf100_clk *clk, int idx)
0409 {
0410 struct gf100_clk_info *info = &clk->eng[idx];
0411 struct nvkm_device *device = clk->base.subdev.device;
0412 nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
0413 }
0414
0415 static int
0416 gf100_clk_prog(struct nvkm_clk *base)
0417 {
0418 struct gf100_clk *clk = gf100_clk(base);
0419 struct {
0420 void (*exec)(struct gf100_clk *, int);
0421 } stage[] = {
0422 { gf100_clk_prog_0 },
0423 { gf100_clk_prog_1 },
0424 { gf100_clk_prog_2 },
0425 { gf100_clk_prog_3 },
0426 { gf100_clk_prog_4 },
0427 };
0428 int i, j;
0429
0430 for (i = 0; i < ARRAY_SIZE(stage); i++) {
0431 for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
0432 if (!clk->eng[j].freq)
0433 continue;
0434 stage[i].exec(clk, j);
0435 }
0436 }
0437
0438 return 0;
0439 }
0440
0441 static void
0442 gf100_clk_tidy(struct nvkm_clk *base)
0443 {
0444 struct gf100_clk *clk = gf100_clk(base);
0445 memset(clk->eng, 0x00, sizeof(clk->eng));
0446 }
0447
0448 static const struct nvkm_clk_func
0449 gf100_clk = {
0450 .read = gf100_clk_read,
0451 .calc = gf100_clk_calc,
0452 .prog = gf100_clk_prog,
0453 .tidy = gf100_clk_tidy,
0454 .domains = {
0455 { nv_clk_src_crystal, 0xff },
0456 { nv_clk_src_href , 0xff },
0457 { nv_clk_src_hubk06 , 0x00 },
0458 { nv_clk_src_hubk01 , 0x01 },
0459 { nv_clk_src_copy , 0x02 },
0460 { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
0461 { nv_clk_src_rop , 0x04 },
0462 { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
0463 { nv_clk_src_vdec , 0x06 },
0464 { nv_clk_src_pmu , 0x0a },
0465 { nv_clk_src_hubk07 , 0x0b },
0466 { nv_clk_src_max }
0467 }
0468 };
0469
0470 int
0471 gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0472 struct nvkm_clk **pclk)
0473 {
0474 struct gf100_clk *clk;
0475
0476 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
0477 return -ENOMEM;
0478 *pclk = &clk->base;
0479
0480 return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base);
0481 }