0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
0025 #include "gt215.h"
0026 #include "pll.h"
0027
0028 #include <subdev/bios.h>
0029 #include <subdev/bios/pll.h>
0030 #include <subdev/timer.h>
0031
0032 struct mcp77_clk {
0033 struct nvkm_clk base;
0034 enum nv_clk_src csrc, ssrc, vsrc;
0035 u32 cctrl, sctrl;
0036 u32 ccoef, scoef;
0037 u32 cpost, spost;
0038 u32 vdiv;
0039 };
0040
0041 static u32
0042 read_div(struct mcp77_clk *clk)
0043 {
0044 struct nvkm_device *device = clk->base.subdev.device;
0045 return nvkm_rd32(device, 0x004600);
0046 }
0047
0048 static u32
0049 read_pll(struct mcp77_clk *clk, u32 base)
0050 {
0051 struct nvkm_device *device = clk->base.subdev.device;
0052 u32 ctrl = nvkm_rd32(device, base + 0);
0053 u32 coef = nvkm_rd32(device, base + 4);
0054 u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
0055 u32 post_div = 0;
0056 u32 clock = 0;
0057 int N1, M1;
0058
0059 switch (base){
0060 case 0x4020:
0061 post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
0062 break;
0063 case 0x4028:
0064 post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
0065 break;
0066 default:
0067 break;
0068 }
0069
0070 N1 = (coef & 0x0000ff00) >> 8;
0071 M1 = (coef & 0x000000ff);
0072 if ((ctrl & 0x80000000) && M1) {
0073 clock = ref * N1 / M1;
0074 clock = clock / post_div;
0075 }
0076
0077 return clock;
0078 }
0079
0080 static int
0081 mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
0082 {
0083 struct mcp77_clk *clk = mcp77_clk(base);
0084 struct nvkm_subdev *subdev = &clk->base.subdev;
0085 struct nvkm_device *device = subdev->device;
0086 u32 mast = nvkm_rd32(device, 0x00c054);
0087 u32 P = 0;
0088
0089 switch (src) {
0090 case nv_clk_src_crystal:
0091 return device->crystal;
0092 case nv_clk_src_href:
0093 return 100000;
0094 case nv_clk_src_hclkm4:
0095 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
0096 case nv_clk_src_hclkm2d3:
0097 return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
0098 case nv_clk_src_host:
0099 switch (mast & 0x000c0000) {
0100 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
0101 case 0x00040000: break;
0102 case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
0103 case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
0104 }
0105 break;
0106 case nv_clk_src_core:
0107 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
0108
0109 switch (mast & 0x00000003) {
0110 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
0111 case 0x00000001: return 0;
0112 case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
0113 case 0x00000003: return read_pll(clk, 0x004028) >> P;
0114 }
0115 break;
0116 case nv_clk_src_cclk:
0117 if ((mast & 0x03000000) != 0x03000000)
0118 return nvkm_clk_read(&clk->base, nv_clk_src_core);
0119
0120 if ((mast & 0x00000200) == 0x00000000)
0121 return nvkm_clk_read(&clk->base, nv_clk_src_core);
0122
0123 switch (mast & 0x00000c00) {
0124 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
0125 case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
0126 case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
0127 default: return 0;
0128 }
0129 case nv_clk_src_shader:
0130 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
0131 switch (mast & 0x00000030) {
0132 case 0x00000000:
0133 if (mast & 0x00000040)
0134 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
0135 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
0136 case 0x00000010: break;
0137 case 0x00000020: return read_pll(clk, 0x004028) >> P;
0138 case 0x00000030: return read_pll(clk, 0x004020) >> P;
0139 }
0140 break;
0141 case nv_clk_src_mem:
0142 return 0;
0143 case nv_clk_src_vdec:
0144 P = (read_div(clk) & 0x00000700) >> 8;
0145
0146 switch (mast & 0x00400000) {
0147 case 0x00400000:
0148 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
0149 default:
0150 return 500000 >> P;
0151 }
0152 break;
0153 default:
0154 break;
0155 }
0156
0157 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
0158 return 0;
0159 }
0160
0161 static u32
0162 calc_pll(struct mcp77_clk *clk, u32 reg,
0163 u32 clock, int *N, int *M, int *P)
0164 {
0165 struct nvkm_subdev *subdev = &clk->base.subdev;
0166 struct nvbios_pll pll;
0167 int ret;
0168
0169 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
0170 if (ret)
0171 return 0;
0172
0173 pll.vco2.max_freq = 0;
0174 pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
0175 if (!pll.refclk)
0176 return 0;
0177
0178 return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
0179 }
0180
0181 static inline u32
0182 calc_P(u32 src, u32 target, int *div)
0183 {
0184 u32 clk0 = src, clk1 = src;
0185 for (*div = 0; *div <= 7; (*div)++) {
0186 if (clk0 <= target) {
0187 clk1 = clk0 << (*div ? 1 : 0);
0188 break;
0189 }
0190 clk0 >>= 1;
0191 }
0192
0193 if (target - clk0 <= clk1 - target)
0194 return clk0;
0195 (*div)--;
0196 return clk1;
0197 }
0198
0199 static int
0200 mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
0201 {
0202 struct mcp77_clk *clk = mcp77_clk(base);
0203 const int shader = cstate->domain[nv_clk_src_shader];
0204 const int core = cstate->domain[nv_clk_src_core];
0205 const int vdec = cstate->domain[nv_clk_src_vdec];
0206 struct nvkm_subdev *subdev = &clk->base.subdev;
0207 u32 out = 0, clock = 0;
0208 int N, M, P1, P2 = 0;
0209 int divs = 0;
0210
0211
0212 if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
0213 out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
0214
0215
0216 clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
0217
0218 if (abs(core - out) <= abs(core - (clock >> 1))) {
0219 clk->csrc = nv_clk_src_hclkm4;
0220 clk->cctrl = divs << 16;
0221 } else {
0222
0223
0224
0225 if(P1 > 2) {
0226 P2 = P1 - 2;
0227 P1 = 2;
0228 }
0229
0230 clk->csrc = nv_clk_src_core;
0231 clk->ccoef = (N << 8) | M;
0232
0233 clk->cctrl = (P2 + 1) << 16;
0234 clk->cpost = (1 << P1) << 16;
0235 }
0236
0237
0238 out = 0;
0239 if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
0240 clk->ssrc = nv_clk_src_href;
0241 } else {
0242 clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
0243 if (clk->csrc == nv_clk_src_core)
0244 out = calc_P((core << 1), shader, &divs);
0245
0246 if (abs(shader - out) <=
0247 abs(shader - clock) &&
0248 (divs + P2) <= 7) {
0249 clk->ssrc = nv_clk_src_core;
0250 clk->sctrl = (divs + P2) << 16;
0251 } else {
0252 clk->ssrc = nv_clk_src_shader;
0253 clk->scoef = (N << 8) | M;
0254 clk->sctrl = P1 << 16;
0255 }
0256 }
0257
0258
0259 out = calc_P(core, vdec, &divs);
0260 clock = calc_P(500000, vdec, &P1);
0261 if(abs(vdec - out) <= abs(vdec - clock)) {
0262 clk->vsrc = nv_clk_src_cclk;
0263 clk->vdiv = divs << 16;
0264 } else {
0265 clk->vsrc = nv_clk_src_vdec;
0266 clk->vdiv = P1 << 16;
0267 }
0268
0269
0270 nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
0271 clk->ccoef, clk->cpost, clk->cctrl);
0272 nvkm_debug(subdev, " spll: %08x %08x %08x\n",
0273 clk->scoef, clk->spost, clk->sctrl);
0274 nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
0275 if (clk->csrc == nv_clk_src_hclkm4)
0276 nvkm_debug(subdev, "core: hrefm4\n");
0277 else
0278 nvkm_debug(subdev, "core: nvpll\n");
0279
0280 if (clk->ssrc == nv_clk_src_hclkm4)
0281 nvkm_debug(subdev, "shader: hrefm4\n");
0282 else if (clk->ssrc == nv_clk_src_core)
0283 nvkm_debug(subdev, "shader: nvpll\n");
0284 else
0285 nvkm_debug(subdev, "shader: spll\n");
0286
0287 if (clk->vsrc == nv_clk_src_hclkm4)
0288 nvkm_debug(subdev, "vdec: 500MHz\n");
0289 else
0290 nvkm_debug(subdev, "vdec: core\n");
0291
0292 return 0;
0293 }
0294
0295 static int
0296 mcp77_clk_prog(struct nvkm_clk *base)
0297 {
0298 struct mcp77_clk *clk = mcp77_clk(base);
0299 struct nvkm_subdev *subdev = &clk->base.subdev;
0300 struct nvkm_device *device = subdev->device;
0301 u32 pllmask = 0, mast;
0302 unsigned long flags;
0303 unsigned long *f = &flags;
0304 int ret = 0;
0305
0306 ret = gt215_clk_pre(&clk->base, f);
0307 if (ret)
0308 goto out;
0309
0310
0311 mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
0312 mast &= ~0x00400e73;
0313 mast |= 0x03000000;
0314
0315 switch (clk->csrc) {
0316 case nv_clk_src_hclkm4:
0317 nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
0318 mast |= 0x00000002;
0319 break;
0320 case nv_clk_src_core:
0321 nvkm_wr32(device, 0x402c, clk->ccoef);
0322 nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
0323 nvkm_wr32(device, 0x4040, clk->cpost);
0324 pllmask |= (0x3 << 8);
0325 mast |= 0x00000003;
0326 break;
0327 default:
0328 nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
0329 goto resume;
0330 }
0331
0332 switch (clk->ssrc) {
0333 case nv_clk_src_href:
0334 nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
0335
0336 break;
0337 case nv_clk_src_core:
0338 nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
0339 mast |= 0x00000020;
0340 break;
0341 case nv_clk_src_shader:
0342 nvkm_wr32(device, 0x4024, clk->scoef);
0343 nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
0344 nvkm_wr32(device, 0x4070, clk->spost);
0345 pllmask |= (0x3 << 12);
0346 mast |= 0x00000030;
0347 break;
0348 default:
0349 nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
0350 goto resume;
0351 }
0352
0353 if (nvkm_msec(device, 2000,
0354 u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
0355 if (tmp == pllmask)
0356 break;
0357 ) < 0)
0358 goto resume;
0359
0360 switch (clk->vsrc) {
0361 case nv_clk_src_cclk:
0362 mast |= 0x00400000;
0363 fallthrough;
0364 default:
0365 nvkm_wr32(device, 0x4600, clk->vdiv);
0366 }
0367
0368 nvkm_wr32(device, 0xc054, mast);
0369
0370 resume:
0371
0372 if (clk->csrc != nv_clk_src_core) {
0373 nvkm_wr32(device, 0x4040, 0x00000000);
0374 nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
0375 }
0376
0377 if (clk->ssrc != nv_clk_src_shader) {
0378 nvkm_wr32(device, 0x4070, 0x00000000);
0379 nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
0380 }
0381
0382 out:
0383 if (ret == -EBUSY)
0384 f = NULL;
0385
0386 gt215_clk_post(&clk->base, f);
0387 return ret;
0388 }
0389
0390 static void
0391 mcp77_clk_tidy(struct nvkm_clk *base)
0392 {
0393 }
0394
0395 static const struct nvkm_clk_func
0396 mcp77_clk = {
0397 .read = mcp77_clk_read,
0398 .calc = mcp77_clk_calc,
0399 .prog = mcp77_clk_prog,
0400 .tidy = mcp77_clk_tidy,
0401 .domains = {
0402 { nv_clk_src_crystal, 0xff },
0403 { nv_clk_src_href , 0xff },
0404 { nv_clk_src_core , 0xff, 0, "core", 1000 },
0405 { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
0406 { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
0407 { nv_clk_src_max }
0408 }
0409 };
0410
0411 int
0412 mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
0413 struct nvkm_clk **pclk)
0414 {
0415 struct mcp77_clk *clk;
0416
0417 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
0418 return -ENOMEM;
0419 *pclk = &clk->base;
0420
0421 return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
0422 }