0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include "priv.h"
0025
0026 #include <subdev/bios.h>
0027 #include <subdev/bios/boost.h>
0028 #include <subdev/bios/cstep.h>
0029 #include <subdev/bios/perf.h>
0030 #include <subdev/bios/vpstate.h>
0031 #include <subdev/fb.h>
0032 #include <subdev/therm.h>
0033 #include <subdev/volt.h>
0034
0035 #include <core/option.h>
0036
0037
0038
0039
0040 static u32
0041 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
0042 u8 pstate, u8 domain, u32 input)
0043 {
0044 struct nvkm_bios *bios = clk->subdev.device->bios;
0045 struct nvbios_boostE boostE;
0046 u8 ver, hdr, cnt, len;
0047 u32 data;
0048
0049 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
0050 if (data) {
0051 struct nvbios_boostS boostS;
0052 u8 idx = 0, sver, shdr;
0053 u32 subd;
0054
0055 input = max(boostE.min, input);
0056 input = min(boostE.max, input);
0057 do {
0058 sver = ver;
0059 shdr = hdr;
0060 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
0061 cnt, len, &boostS);
0062 if (subd && boostS.domain == domain) {
0063 if (adjust)
0064 input = input * boostS.percent / 100;
0065 input = max(boostS.min, input);
0066 input = min(boostS.max, input);
0067 break;
0068 }
0069 } while (subd);
0070 }
0071
0072 return input;
0073 }
0074
0075
0076
0077
0078 static bool
0079 nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
0080 u32 max_volt, int temp)
0081 {
0082 const struct nvkm_domain *domain = clk->domains;
0083 struct nvkm_volt *volt = clk->subdev.device->volt;
0084 int voltage;
0085
0086 while (domain && domain->name != nv_clk_src_max) {
0087 if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
0088 u32 freq = cstate->domain[domain->name];
0089 switch (clk->boost_mode) {
0090 case NVKM_CLK_BOOST_NONE:
0091 if (clk->base_khz && freq > clk->base_khz)
0092 return false;
0093 fallthrough;
0094 case NVKM_CLK_BOOST_BIOS:
0095 if (clk->boost_khz && freq > clk->boost_khz)
0096 return false;
0097 }
0098 }
0099 domain++;
0100 }
0101
0102 if (!volt)
0103 return true;
0104
0105 voltage = nvkm_volt_map(volt, cstate->voltage, temp);
0106 if (voltage < 0)
0107 return false;
0108 return voltage <= min(max_volt, volt->max_uv);
0109 }
0110
0111 static struct nvkm_cstate *
0112 nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
0113 struct nvkm_cstate *cstate)
0114 {
0115 struct nvkm_device *device = clk->subdev.device;
0116 struct nvkm_volt *volt = device->volt;
0117 int max_volt;
0118
0119 if (!pstate || !cstate)
0120 return NULL;
0121
0122 if (!volt)
0123 return cstate;
0124
0125 max_volt = volt->max_uv;
0126 if (volt->max0_id != 0xff)
0127 max_volt = min(max_volt,
0128 nvkm_volt_map(volt, volt->max0_id, clk->temp));
0129 if (volt->max1_id != 0xff)
0130 max_volt = min(max_volt,
0131 nvkm_volt_map(volt, volt->max1_id, clk->temp));
0132 if (volt->max2_id != 0xff)
0133 max_volt = min(max_volt,
0134 nvkm_volt_map(volt, volt->max2_id, clk->temp));
0135
0136 list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
0137 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
0138 return cstate;
0139 }
0140
0141 return NULL;
0142 }
0143
0144 static struct nvkm_cstate *
0145 nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
0146 {
0147 struct nvkm_cstate *cstate;
0148 if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
0149 return list_last_entry(&pstate->list, typeof(*cstate), head);
0150 else {
0151 list_for_each_entry(cstate, &pstate->list, head) {
0152 if (cstate->id == cstatei)
0153 return cstate;
0154 }
0155 }
0156 return NULL;
0157 }
0158
0159 static int
0160 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
0161 {
0162 struct nvkm_subdev *subdev = &clk->subdev;
0163 struct nvkm_device *device = subdev->device;
0164 struct nvkm_therm *therm = device->therm;
0165 struct nvkm_volt *volt = device->volt;
0166 struct nvkm_cstate *cstate;
0167 int ret;
0168
0169 if (!list_empty(&pstate->list)) {
0170 cstate = nvkm_cstate_get(clk, pstate, cstatei);
0171 cstate = nvkm_cstate_find_best(clk, pstate, cstate);
0172 if (!cstate)
0173 return -EINVAL;
0174 } else {
0175 cstate = &pstate->base;
0176 }
0177
0178 if (therm) {
0179 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
0180 if (ret && ret != -ENODEV) {
0181 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
0182 return ret;
0183 }
0184 }
0185
0186 if (volt) {
0187 ret = nvkm_volt_set_id(volt, cstate->voltage,
0188 pstate->base.voltage, clk->temp, +1);
0189 if (ret && ret != -ENODEV) {
0190 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
0191 return ret;
0192 }
0193 }
0194
0195 ret = clk->func->calc(clk, cstate);
0196 if (ret == 0) {
0197 ret = clk->func->prog(clk);
0198 clk->func->tidy(clk);
0199 }
0200
0201 if (volt) {
0202 ret = nvkm_volt_set_id(volt, cstate->voltage,
0203 pstate->base.voltage, clk->temp, -1);
0204 if (ret && ret != -ENODEV)
0205 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
0206 }
0207
0208 if (therm) {
0209 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
0210 if (ret && ret != -ENODEV)
0211 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
0212 }
0213
0214 return ret;
0215 }
0216
0217 static void
0218 nvkm_cstate_del(struct nvkm_cstate *cstate)
0219 {
0220 list_del(&cstate->head);
0221 kfree(cstate);
0222 }
0223
0224 static int
0225 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
0226 {
0227 struct nvkm_bios *bios = clk->subdev.device->bios;
0228 struct nvkm_volt *volt = clk->subdev.device->volt;
0229 const struct nvkm_domain *domain = clk->domains;
0230 struct nvkm_cstate *cstate = NULL;
0231 struct nvbios_cstepX cstepX;
0232 u8 ver, hdr;
0233 u32 data;
0234
0235 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
0236 if (!data)
0237 return -ENOENT;
0238
0239 if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
0240 return -EINVAL;
0241
0242 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
0243 if (!cstate)
0244 return -ENOMEM;
0245
0246 *cstate = pstate->base;
0247 cstate->voltage = cstepX.voltage;
0248 cstate->id = idx;
0249
0250 while (domain && domain->name != nv_clk_src_max) {
0251 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
0252 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
0253 domain->bios, cstepX.freq);
0254 cstate->domain[domain->name] = freq;
0255 }
0256 domain++;
0257 }
0258
0259 list_add(&cstate->head, &pstate->list);
0260 return 0;
0261 }
0262
0263
0264
0265
0266 static int
0267 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
0268 {
0269 struct nvkm_subdev *subdev = &clk->subdev;
0270 struct nvkm_fb *fb = subdev->device->fb;
0271 struct nvkm_pci *pci = subdev->device->pci;
0272 struct nvkm_pstate *pstate;
0273 int ret, idx = 0;
0274
0275 list_for_each_entry(pstate, &clk->states, head) {
0276 if (idx++ == pstatei)
0277 break;
0278 }
0279
0280 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
0281 clk->pstate = pstatei;
0282
0283 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
0284
0285 if (fb && fb->ram && fb->ram->func->calc) {
0286 struct nvkm_ram *ram = fb->ram;
0287 int khz = pstate->base.domain[nv_clk_src_mem];
0288 do {
0289 ret = ram->func->calc(ram, khz);
0290 if (ret == 0)
0291 ret = ram->func->prog(ram);
0292 } while (ret > 0);
0293 ram->func->tidy(ram);
0294 }
0295
0296 return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
0297 }
0298
0299 static void
0300 nvkm_pstate_work(struct work_struct *work)
0301 {
0302 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
0303 struct nvkm_subdev *subdev = &clk->subdev;
0304 int pstate;
0305
0306 if (!atomic_xchg(&clk->waiting, 0))
0307 return;
0308 clk->pwrsrc = power_supply_is_system_supplied();
0309
0310 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
0311 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
0312 clk->astate, clk->temp, clk->dstate);
0313
0314 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
0315 if (clk->state_nr && pstate != -1) {
0316 pstate = (pstate < 0) ? clk->astate : pstate;
0317 pstate = min(pstate, clk->state_nr - 1);
0318 pstate = max(pstate, clk->dstate);
0319 } else {
0320 pstate = clk->pstate = -1;
0321 }
0322
0323 nvkm_trace(subdev, "-> %d\n", pstate);
0324 if (pstate != clk->pstate) {
0325 int ret = nvkm_pstate_prog(clk, pstate);
0326 if (ret) {
0327 nvkm_error(subdev, "error setting pstate %d: %d\n",
0328 pstate, ret);
0329 }
0330 }
0331
0332 wake_up_all(&clk->wait);
0333 }
0334
0335 static int
0336 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
0337 {
0338 atomic_set(&clk->waiting, 1);
0339 schedule_work(&clk->work);
0340 if (wait)
0341 wait_event(clk->wait, !atomic_read(&clk->waiting));
0342 return 0;
0343 }
0344
0345 static void
0346 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
0347 {
0348 const struct nvkm_domain *clock = clk->domains - 1;
0349 struct nvkm_cstate *cstate;
0350 struct nvkm_subdev *subdev = &clk->subdev;
0351 char info[3][32] = { "", "", "" };
0352 char name[4] = "--";
0353 int i = -1;
0354
0355 if (pstate->pstate != 0xff)
0356 snprintf(name, sizeof(name), "%02x", pstate->pstate);
0357
0358 while ((++clock)->name != nv_clk_src_max) {
0359 u32 lo = pstate->base.domain[clock->name];
0360 u32 hi = lo;
0361 if (hi == 0)
0362 continue;
0363
0364 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
0365 list_for_each_entry(cstate, &pstate->list, head) {
0366 u32 freq = cstate->domain[clock->name];
0367 lo = min(lo, freq);
0368 hi = max(hi, freq);
0369 nvkm_debug(subdev, "%10d KHz\n", freq);
0370 }
0371
0372 if (clock->mname && ++i < ARRAY_SIZE(info)) {
0373 lo /= clock->mdiv;
0374 hi /= clock->mdiv;
0375 if (lo == hi) {
0376 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
0377 clock->mname, lo);
0378 } else {
0379 snprintf(info[i], sizeof(info[i]),
0380 "%s %d-%d MHz", clock->mname, lo, hi);
0381 }
0382 }
0383 }
0384
0385 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
0386 }
0387
0388 static void
0389 nvkm_pstate_del(struct nvkm_pstate *pstate)
0390 {
0391 struct nvkm_cstate *cstate, *temp;
0392
0393 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
0394 nvkm_cstate_del(cstate);
0395 }
0396
0397 list_del(&pstate->head);
0398 kfree(pstate);
0399 }
0400
0401 static int
0402 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
0403 {
0404 struct nvkm_bios *bios = clk->subdev.device->bios;
0405 const struct nvkm_domain *domain = clk->domains - 1;
0406 struct nvkm_pstate *pstate;
0407 struct nvkm_cstate *cstate;
0408 struct nvbios_cstepE cstepE;
0409 struct nvbios_perfE perfE;
0410 u8 ver, hdr, cnt, len;
0411 u32 data;
0412
0413 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
0414 if (!data)
0415 return -EINVAL;
0416 if (perfE.pstate == 0xff)
0417 return 0;
0418
0419 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
0420 cstate = &pstate->base;
0421 if (!pstate)
0422 return -ENOMEM;
0423
0424 INIT_LIST_HEAD(&pstate->list);
0425
0426 pstate->pstate = perfE.pstate;
0427 pstate->fanspeed = perfE.fanspeed;
0428 pstate->pcie_speed = perfE.pcie_speed;
0429 pstate->pcie_width = perfE.pcie_width;
0430 cstate->voltage = perfE.voltage;
0431 cstate->domain[nv_clk_src_core] = perfE.core;
0432 cstate->domain[nv_clk_src_shader] = perfE.shader;
0433 cstate->domain[nv_clk_src_mem] = perfE.memory;
0434 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
0435 cstate->domain[nv_clk_src_dom6] = perfE.disp;
0436
0437 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
0438 struct nvbios_perfS perfS;
0439 u8 sver = ver, shdr = hdr;
0440 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
0441 &sver, &shdr, cnt, len, &perfS);
0442 if (perfSe == 0 || sver != 0x40)
0443 continue;
0444
0445 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
0446 perfS.v40.freq = nvkm_clk_adjust(clk, false,
0447 pstate->pstate,
0448 domain->bios,
0449 perfS.v40.freq);
0450 }
0451
0452 cstate->domain[domain->name] = perfS.v40.freq;
0453 }
0454
0455 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
0456 if (data) {
0457 int idx = cstepE.index;
0458 do {
0459 nvkm_cstate_new(clk, idx, pstate);
0460 } while(idx--);
0461 }
0462
0463 nvkm_pstate_info(clk, pstate);
0464 list_add_tail(&pstate->head, &clk->states);
0465 clk->state_nr++;
0466 return 0;
0467 }
0468
0469
0470
0471
0472 static int
0473 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
0474 {
0475 struct nvkm_pstate *pstate;
0476 int i = 0;
0477
0478 if (!clk->allow_reclock)
0479 return -ENOSYS;
0480
0481 if (req != -1 && req != -2) {
0482 list_for_each_entry(pstate, &clk->states, head) {
0483 if (pstate->pstate == req)
0484 break;
0485 i++;
0486 }
0487
0488 if (pstate->pstate != req)
0489 return -EINVAL;
0490 req = i;
0491 }
0492
0493 return req + 2;
0494 }
0495
0496 static int
0497 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
0498 {
0499 int ret = 1;
0500
0501 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
0502 return -2;
0503
0504 if (strncasecmpz(mode, "disabled", arglen)) {
0505 char save = mode[arglen];
0506 long v;
0507
0508 ((char *)mode)[arglen] = '\0';
0509 if (!kstrtol(mode, 0, &v)) {
0510 ret = nvkm_clk_ustate_update(clk, v);
0511 if (ret < 0)
0512 ret = 1;
0513 }
0514 ((char *)mode)[arglen] = save;
0515 }
0516
0517 return ret - 2;
0518 }
0519
0520 int
0521 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
0522 {
0523 int ret = nvkm_clk_ustate_update(clk, req);
0524 if (ret >= 0) {
0525 if (ret -= 2, pwr) clk->ustate_ac = ret;
0526 else clk->ustate_dc = ret;
0527 return nvkm_pstate_calc(clk, true);
0528 }
0529 return ret;
0530 }
0531
0532 int
0533 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
0534 {
0535 if (!rel) clk->astate = req;
0536 if ( rel) clk->astate += rel;
0537 clk->astate = min(clk->astate, clk->state_nr - 1);
0538 clk->astate = max(clk->astate, 0);
0539 return nvkm_pstate_calc(clk, wait);
0540 }
0541
0542 int
0543 nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
0544 {
0545 if (clk->temp == temp)
0546 return 0;
0547 clk->temp = temp;
0548 return nvkm_pstate_calc(clk, false);
0549 }
0550
0551 int
0552 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
0553 {
0554 if (!rel) clk->dstate = req;
0555 if ( rel) clk->dstate += rel;
0556 clk->dstate = min(clk->dstate, clk->state_nr - 1);
0557 clk->dstate = max(clk->dstate, 0);
0558 return nvkm_pstate_calc(clk, true);
0559 }
0560
0561 int
0562 nvkm_clk_pwrsrc(struct nvkm_device *device)
0563 {
0564 if (device->clk)
0565 return nvkm_pstate_calc(device->clk, false);
0566 return 0;
0567 }
0568
0569
0570
0571
0572
0573 int
0574 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
0575 {
0576 return clk->func->read(clk, src);
0577 }
0578
0579 static int
0580 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
0581 {
0582 struct nvkm_clk *clk = nvkm_clk(subdev);
0583 flush_work(&clk->work);
0584 if (clk->func->fini)
0585 clk->func->fini(clk);
0586 return 0;
0587 }
0588
0589 static int
0590 nvkm_clk_init(struct nvkm_subdev *subdev)
0591 {
0592 struct nvkm_clk *clk = nvkm_clk(subdev);
0593 const struct nvkm_domain *clock = clk->domains;
0594 int ret;
0595
0596 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
0597 INIT_LIST_HEAD(&clk->bstate.list);
0598 clk->bstate.pstate = 0xff;
0599
0600 while (clock->name != nv_clk_src_max) {
0601 ret = nvkm_clk_read(clk, clock->name);
0602 if (ret < 0) {
0603 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
0604 return ret;
0605 }
0606 clk->bstate.base.domain[clock->name] = ret;
0607 clock++;
0608 }
0609
0610 nvkm_pstate_info(clk, &clk->bstate);
0611
0612 if (clk->func->init)
0613 return clk->func->init(clk);
0614
0615 clk->astate = clk->state_nr - 1;
0616 clk->dstate = 0;
0617 clk->pstate = -1;
0618 clk->temp = 90;
0619 nvkm_pstate_calc(clk, true);
0620 return 0;
0621 }
0622
0623 static void *
0624 nvkm_clk_dtor(struct nvkm_subdev *subdev)
0625 {
0626 struct nvkm_clk *clk = nvkm_clk(subdev);
0627 struct nvkm_pstate *pstate, *temp;
0628
0629
0630 if (clk->func->pstates)
0631 return clk;
0632
0633 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
0634 nvkm_pstate_del(pstate);
0635 }
0636
0637 return clk;
0638 }
0639
0640 static const struct nvkm_subdev_func
0641 nvkm_clk = {
0642 .dtor = nvkm_clk_dtor,
0643 .init = nvkm_clk_init,
0644 .fini = nvkm_clk_fini,
0645 };
0646
0647 int
0648 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
0649 enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
0650 {
0651 struct nvkm_subdev *subdev = &clk->subdev;
0652 struct nvkm_bios *bios = device->bios;
0653 int ret, idx, arglen;
0654 const char *mode;
0655 struct nvbios_vpstate_header h;
0656
0657 nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
0658
0659 if (bios && !nvbios_vpstate_parse(bios, &h)) {
0660 struct nvbios_vpstate_entry base, boost;
0661 if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
0662 clk->boost_khz = boost.clock_mhz * 1000;
0663 if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
0664 clk->base_khz = base.clock_mhz * 1000;
0665 }
0666
0667 clk->func = func;
0668 INIT_LIST_HEAD(&clk->states);
0669 clk->domains = func->domains;
0670 clk->ustate_ac = -1;
0671 clk->ustate_dc = -1;
0672 clk->allow_reclock = allow_reclock;
0673
0674 INIT_WORK(&clk->work, nvkm_pstate_work);
0675 init_waitqueue_head(&clk->wait);
0676 atomic_set(&clk->waiting, 0);
0677
0678
0679 if (!func->pstates) {
0680 idx = 0;
0681 do {
0682 ret = nvkm_pstate_new(clk, idx++);
0683 } while (ret == 0);
0684 } else {
0685 for (idx = 0; idx < func->nr_pstates; idx++)
0686 list_add_tail(&func->pstates[idx].head, &clk->states);
0687 clk->state_nr = func->nr_pstates;
0688 }
0689
0690 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
0691 if (mode) {
0692 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
0693 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
0694 }
0695
0696 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
0697 if (mode)
0698 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
0699
0700 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
0701 if (mode)
0702 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
0703
0704 clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
0705 NVKM_CLK_BOOST_NONE);
0706 return 0;
0707 }
0708
0709 int
0710 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
0711 enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
0712 {
0713 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
0714 return -ENOMEM;
0715 return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
0716 }