0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define pr_fmt(fmt) "ACPI CPPC: " fmt
0035
0036 #include <linux/delay.h>
0037 #include <linux/iopoll.h>
0038 #include <linux/ktime.h>
0039 #include <linux/rwsem.h>
0040 #include <linux/wait.h>
0041 #include <linux/topology.h>
0042
0043 #include <acpi/cppc_acpi.h>
0044
0045 struct cppc_pcc_data {
0046 struct pcc_mbox_chan *pcc_channel;
0047 void __iomem *pcc_comm_addr;
0048 bool pcc_channel_acquired;
0049 unsigned int deadline_us;
0050 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
0051
0052 bool pending_pcc_write_cmd;
0053 bool platform_owns_pcc;
0054 unsigned int pcc_write_cnt;
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 struct rw_semaphore pcc_lock;
0071
0072
0073 wait_queue_head_t pcc_write_wait_q;
0074 ktime_t last_cmd_cmpl_time;
0075 ktime_t last_mpar_reset;
0076 int mpar_count;
0077 int refcount;
0078 };
0079
0080
0081 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
0082
0083 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
0084
0085
0086
0087
0088
0089
0090
0091
0092 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
0093
0094
0095 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
0096 0x8 + (offs))
0097
0098
0099 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
0100 (cpc)->cpc_entry.reg.space_id == \
0101 ACPI_ADR_SPACE_PLATFORM_COMM)
0102
0103
0104 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
0105 (cpc)->cpc_entry.reg.space_id == \
0106 ACPI_ADR_SPACE_SYSTEM_MEMORY)
0107
0108
0109 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
0110 (cpc)->cpc_entry.reg.space_id == \
0111 ACPI_ADR_SPACE_SYSTEM_IO)
0112
0113
0114 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
0115 (reg)->address == 0 && \
0116 (reg)->bit_width == 0 && \
0117 (reg)->bit_offset == 0 && \
0118 (reg)->access_width == 0)
0119
0120
0121 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
0122 !!(cpc)->cpc_entry.int_value : \
0123 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
0124
0125
0126
0127
0128
0129 #define NUM_RETRIES 500ULL
0130
0131 #define OVER_16BTS_MASK ~0xFFFFULL
0132
0133 #define define_one_cppc_ro(_name) \
0134 static struct kobj_attribute _name = \
0135 __ATTR(_name, 0444, show_##_name, NULL)
0136
0137 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
0138
0139 #define show_cppc_data(access_fn, struct_name, member_name) \
0140 static ssize_t show_##member_name(struct kobject *kobj, \
0141 struct kobj_attribute *attr, char *buf) \
0142 { \
0143 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
0144 struct struct_name st_name = {0}; \
0145 int ret; \
0146 \
0147 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
0148 if (ret) \
0149 return ret; \
0150 \
0151 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
0152 (u64)st_name.member_name); \
0153 } \
0154 define_one_cppc_ro(member_name)
0155
0156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
0157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
0158 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
0159 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
0160 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
0161 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
0162
0163 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
0164 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
0165
0166 static ssize_t show_feedback_ctrs(struct kobject *kobj,
0167 struct kobj_attribute *attr, char *buf)
0168 {
0169 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
0170 struct cppc_perf_fb_ctrs fb_ctrs = {0};
0171 int ret;
0172
0173 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
0174 if (ret)
0175 return ret;
0176
0177 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
0178 fb_ctrs.reference, fb_ctrs.delivered);
0179 }
0180 define_one_cppc_ro(feedback_ctrs);
0181
0182 static struct attribute *cppc_attrs[] = {
0183 &feedback_ctrs.attr,
0184 &reference_perf.attr,
0185 &wraparound_time.attr,
0186 &highest_perf.attr,
0187 &lowest_perf.attr,
0188 &lowest_nonlinear_perf.attr,
0189 &nominal_perf.attr,
0190 &nominal_freq.attr,
0191 &lowest_freq.attr,
0192 NULL
0193 };
0194 ATTRIBUTE_GROUPS(cppc);
0195
0196 static struct kobj_type cppc_ktype = {
0197 .sysfs_ops = &kobj_sysfs_ops,
0198 .default_groups = cppc_groups,
0199 };
0200
0201 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
0202 {
0203 int ret, status;
0204 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
0205 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
0206 pcc_ss_data->pcc_comm_addr;
0207
0208 if (!pcc_ss_data->platform_owns_pcc)
0209 return 0;
0210
0211
0212
0213
0214
0215 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
0216 status & PCC_CMD_COMPLETE_MASK, 3,
0217 pcc_ss_data->deadline_us);
0218
0219 if (likely(!ret)) {
0220 pcc_ss_data->platform_owns_pcc = false;
0221 if (chk_err_bit && (status & PCC_ERROR_MASK))
0222 ret = -EIO;
0223 }
0224
0225 if (unlikely(ret))
0226 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
0227 pcc_ss_id, ret);
0228
0229 return ret;
0230 }
0231
0232
0233
0234
0235
0236 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
0237 {
0238 int ret = -EIO, i;
0239 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
0240 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
0241 pcc_ss_data->pcc_comm_addr;
0242 unsigned int time_delta;
0243
0244
0245
0246
0247
0248 if (cmd == CMD_READ) {
0249
0250
0251
0252
0253
0254 if (pcc_ss_data->pending_pcc_write_cmd)
0255 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
0256
0257 ret = check_pcc_chan(pcc_ss_id, false);
0258 if (ret)
0259 goto end;
0260 } else
0261 pcc_ss_data->pending_pcc_write_cmd = FALSE;
0262
0263
0264
0265
0266
0267
0268 if (pcc_ss_data->pcc_mrtt) {
0269 time_delta = ktime_us_delta(ktime_get(),
0270 pcc_ss_data->last_cmd_cmpl_time);
0271 if (pcc_ss_data->pcc_mrtt > time_delta)
0272 udelay(pcc_ss_data->pcc_mrtt - time_delta);
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 if (pcc_ss_data->pcc_mpar) {
0287 if (pcc_ss_data->mpar_count == 0) {
0288 time_delta = ktime_ms_delta(ktime_get(),
0289 pcc_ss_data->last_mpar_reset);
0290 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
0291 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
0292 pcc_ss_id);
0293 ret = -EIO;
0294 goto end;
0295 }
0296 pcc_ss_data->last_mpar_reset = ktime_get();
0297 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
0298 }
0299 pcc_ss_data->mpar_count--;
0300 }
0301
0302
0303 writew_relaxed(cmd, &generic_comm_base->command);
0304
0305
0306 writew_relaxed(0, &generic_comm_base->status);
0307
0308 pcc_ss_data->platform_owns_pcc = true;
0309
0310
0311 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
0312 if (ret < 0) {
0313 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
0314 pcc_ss_id, cmd, ret);
0315 goto end;
0316 }
0317
0318
0319 ret = check_pcc_chan(pcc_ss_id, true);
0320
0321 if (pcc_ss_data->pcc_mrtt)
0322 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
0323
0324 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
0325 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
0326 else
0327 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
0328
0329 end:
0330 if (cmd == CMD_WRITE) {
0331 if (unlikely(ret)) {
0332 for_each_possible_cpu(i) {
0333 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
0334
0335 if (!desc)
0336 continue;
0337
0338 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
0339 desc->write_cmd_status = ret;
0340 }
0341 }
0342 pcc_ss_data->pcc_write_cnt++;
0343 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
0344 }
0345
0346 return ret;
0347 }
0348
0349 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
0350 {
0351 if (ret < 0)
0352 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
0353 *(u16 *)msg, ret);
0354 else
0355 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
0356 *(u16 *)msg, ret);
0357 }
0358
0359 static struct mbox_client cppc_mbox_cl = {
0360 .tx_done = cppc_chan_tx_done,
0361 .knows_txdone = true,
0362 };
0363
0364 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
0365 {
0366 int result = -EFAULT;
0367 acpi_status status = AE_OK;
0368 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
0369 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
0370 struct acpi_buffer state = {0, NULL};
0371 union acpi_object *psd = NULL;
0372 struct acpi_psd_package *pdomain;
0373
0374 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
0375 &buffer, ACPI_TYPE_PACKAGE);
0376 if (status == AE_NOT_FOUND)
0377 return 0;
0378 if (ACPI_FAILURE(status))
0379 return -ENODEV;
0380
0381 psd = buffer.pointer;
0382 if (!psd || psd->package.count != 1) {
0383 pr_debug("Invalid _PSD data\n");
0384 goto end;
0385 }
0386
0387 pdomain = &(cpc_ptr->domain_info);
0388
0389 state.length = sizeof(struct acpi_psd_package);
0390 state.pointer = pdomain;
0391
0392 status = acpi_extract_package(&(psd->package.elements[0]),
0393 &format, &state);
0394 if (ACPI_FAILURE(status)) {
0395 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
0396 goto end;
0397 }
0398
0399 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
0400 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
0401 goto end;
0402 }
0403
0404 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
0405 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
0406 goto end;
0407 }
0408
0409 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
0410 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
0411 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
0412 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
0413 goto end;
0414 }
0415
0416 result = 0;
0417 end:
0418 kfree(buffer.pointer);
0419 return result;
0420 }
0421
0422 bool acpi_cpc_valid(void)
0423 {
0424 struct cpc_desc *cpc_ptr;
0425 int cpu;
0426
0427 for_each_present_cpu(cpu) {
0428 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
0429 if (!cpc_ptr)
0430 return false;
0431 }
0432
0433 return true;
0434 }
0435 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
0436
0437 bool cppc_allow_fast_switch(void)
0438 {
0439 struct cpc_register_resource *desired_reg;
0440 struct cpc_desc *cpc_ptr;
0441 int cpu;
0442
0443 for_each_possible_cpu(cpu) {
0444 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
0445 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
0446 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
0447 !CPC_IN_SYSTEM_IO(desired_reg))
0448 return false;
0449 }
0450
0451 return true;
0452 }
0453 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
0454
0455
0456
0457
0458
0459
0460
0461
0462 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
0463 {
0464 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
0465 struct acpi_psd_package *match_pdomain;
0466 struct acpi_psd_package *pdomain;
0467 int count_target, i;
0468
0469
0470
0471
0472
0473 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
0474 if (!cpc_ptr)
0475 return -EFAULT;
0476
0477 pdomain = &(cpc_ptr->domain_info);
0478 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
0479 if (pdomain->num_processors <= 1)
0480 return 0;
0481
0482
0483 count_target = pdomain->num_processors;
0484 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
0485 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
0486 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
0487 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
0488 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
0489 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
0490
0491 for_each_possible_cpu(i) {
0492 if (i == cpu)
0493 continue;
0494
0495 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
0496 if (!match_cpc_ptr)
0497 goto err_fault;
0498
0499 match_pdomain = &(match_cpc_ptr->domain_info);
0500 if (match_pdomain->domain != pdomain->domain)
0501 continue;
0502
0503
0504 if (match_pdomain->num_processors != count_target)
0505 goto err_fault;
0506
0507 if (pdomain->coord_type != match_pdomain->coord_type)
0508 goto err_fault;
0509
0510 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
0511 }
0512
0513 return 0;
0514
0515 err_fault:
0516
0517 cpumask_clear(cpu_data->shared_cpu_map);
0518 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
0519 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
0520
0521 return -EFAULT;
0522 }
0523 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
0524
0525 static int register_pcc_channel(int pcc_ss_idx)
0526 {
0527 struct pcc_mbox_chan *pcc_chan;
0528 u64 usecs_lat;
0529
0530 if (pcc_ss_idx >= 0) {
0531 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
0532
0533 if (IS_ERR(pcc_chan)) {
0534 pr_err("Failed to find PCC channel for subspace %d\n",
0535 pcc_ss_idx);
0536 return -ENODEV;
0537 }
0538
0539 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
0540
0541
0542
0543
0544
0545 usecs_lat = NUM_RETRIES * pcc_chan->latency;
0546 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
0547 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
0548 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
0549 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
0550
0551 pcc_data[pcc_ss_idx]->pcc_comm_addr =
0552 acpi_os_ioremap(pcc_chan->shmem_base_addr,
0553 pcc_chan->shmem_size);
0554 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
0555 pr_err("Failed to ioremap PCC comm region mem for %d\n",
0556 pcc_ss_idx);
0557 return -ENOMEM;
0558 }
0559
0560
0561 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
0562 }
0563
0564 return 0;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 bool __weak cpc_ffh_supported(void)
0576 {
0577 return false;
0578 }
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 bool __weak cpc_supported_by_cpu(void)
0589 {
0590 return false;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603 static int pcc_data_alloc(int pcc_ss_id)
0604 {
0605 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
0606 return -EINVAL;
0607
0608 if (pcc_data[pcc_ss_id]) {
0609 pcc_data[pcc_ss_id]->refcount++;
0610 } else {
0611 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
0612 GFP_KERNEL);
0613 if (!pcc_data[pcc_ss_id])
0614 return -ENOMEM;
0615 pcc_data[pcc_ss_id]->refcount++;
0616 }
0617
0618 return 0;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 #ifndef arch_init_invariance_cppc
0651 static inline void arch_init_invariance_cppc(void) { }
0652 #endif
0653
0654
0655
0656
0657
0658
0659
0660 int acpi_cppc_processor_probe(struct acpi_processor *pr)
0661 {
0662 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
0663 union acpi_object *out_obj, *cpc_obj;
0664 struct cpc_desc *cpc_ptr;
0665 struct cpc_reg *gas_t;
0666 struct device *cpu_dev;
0667 acpi_handle handle = pr->handle;
0668 unsigned int num_ent, i, cpc_rev;
0669 int pcc_subspace_id = -1;
0670 acpi_status status;
0671 int ret = -ENODATA;
0672
0673 if (!osc_sb_cppc2_support_acked) {
0674 pr_debug("CPPC v2 _OSC not acked\n");
0675 if (!cpc_supported_by_cpu())
0676 return -ENODEV;
0677 }
0678
0679
0680 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
0681 ACPI_TYPE_PACKAGE);
0682 if (ACPI_FAILURE(status)) {
0683 ret = -ENODEV;
0684 goto out_buf_free;
0685 }
0686
0687 out_obj = (union acpi_object *) output.pointer;
0688
0689 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
0690 if (!cpc_ptr) {
0691 ret = -ENOMEM;
0692 goto out_buf_free;
0693 }
0694
0695
0696 cpc_obj = &out_obj->package.elements[0];
0697 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
0698 num_ent = cpc_obj->integer.value;
0699 if (num_ent <= 1) {
0700 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
0701 num_ent, pr->id);
0702 goto out_free;
0703 }
0704 } else {
0705 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
0706 cpc_obj->type, pr->id);
0707 goto out_free;
0708 }
0709
0710
0711 cpc_obj = &out_obj->package.elements[1];
0712 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
0713 cpc_rev = cpc_obj->integer.value;
0714 } else {
0715 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
0716 cpc_obj->type, pr->id);
0717 goto out_free;
0718 }
0719
0720 if (cpc_rev < CPPC_V2_REV) {
0721 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
0722 pr->id);
0723 goto out_free;
0724 }
0725
0726
0727
0728
0729
0730
0731 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
0732 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
0733 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
0734 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
0735 num_ent, pr->id);
0736 goto out_free;
0737 }
0738 if (cpc_rev > CPPC_V3_REV) {
0739 num_ent = CPPC_V3_NUM_ENT;
0740 cpc_rev = CPPC_V3_REV;
0741 }
0742
0743 cpc_ptr->num_entries = num_ent;
0744 cpc_ptr->version = cpc_rev;
0745
0746
0747 for (i = 2; i < num_ent; i++) {
0748 cpc_obj = &out_obj->package.elements[i];
0749
0750 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
0751 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
0752 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
0753 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
0754 gas_t = (struct cpc_reg *)
0755 cpc_obj->buffer.pointer;
0756
0757
0758
0759
0760
0761
0762
0763 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
0764 if (pcc_subspace_id < 0) {
0765 pcc_subspace_id = gas_t->access_width;
0766 if (pcc_data_alloc(pcc_subspace_id))
0767 goto out_free;
0768 } else if (pcc_subspace_id != gas_t->access_width) {
0769 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
0770 pr->id);
0771 goto out_free;
0772 }
0773 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
0774 if (gas_t->address) {
0775 void __iomem *addr;
0776
0777 if (!osc_cpc_flexible_adr_space_confirmed) {
0778 pr_debug("Flexible address space capability not supported\n");
0779 if (!cpc_supported_by_cpu())
0780 goto out_free;
0781 }
0782
0783 addr = ioremap(gas_t->address, gas_t->bit_width/8);
0784 if (!addr)
0785 goto out_free;
0786 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
0787 }
0788 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
0789 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
0790
0791
0792
0793
0794
0795 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
0796 gas_t->access_width);
0797 goto out_free;
0798 }
0799 if (gas_t->address & OVER_16BTS_MASK) {
0800
0801 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
0802 gas_t->address);
0803 goto out_free;
0804 }
0805 if (!osc_cpc_flexible_adr_space_confirmed) {
0806 pr_debug("Flexible address space capability not supported\n");
0807 if (!cpc_supported_by_cpu())
0808 goto out_free;
0809 }
0810 } else {
0811 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
0812
0813 pr_debug("Unsupported register type (%d) in _CPC\n",
0814 gas_t->space_id);
0815 goto out_free;
0816 }
0817 }
0818
0819 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
0820 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
0821 } else {
0822 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
0823 i, pr->id);
0824 goto out_free;
0825 }
0826 }
0827 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
0828
0829
0830
0831
0832
0833
0834 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
0835 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
0836 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
0837 }
0838
0839
0840
0841 cpc_ptr->cpu_id = pr->id;
0842
0843
0844 ret = acpi_get_psd(cpc_ptr, handle);
0845 if (ret)
0846 goto out_free;
0847
0848
0849 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
0850 ret = register_pcc_channel(pcc_subspace_id);
0851 if (ret)
0852 goto out_free;
0853
0854 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
0855 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
0856 }
0857
0858
0859 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
0860
0861
0862 cpu_dev = get_cpu_device(pr->id);
0863 if (!cpu_dev) {
0864 ret = -EINVAL;
0865 goto out_free;
0866 }
0867
0868
0869 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
0870
0871 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
0872 "acpi_cppc");
0873 if (ret) {
0874 per_cpu(cpc_desc_ptr, pr->id) = NULL;
0875 kobject_put(&cpc_ptr->kobj);
0876 goto out_free;
0877 }
0878
0879 arch_init_invariance_cppc();
0880
0881 kfree(output.pointer);
0882 return 0;
0883
0884 out_free:
0885
0886 for (i = 2; i < cpc_ptr->num_entries; i++) {
0887 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
0888
0889 if (addr)
0890 iounmap(addr);
0891 }
0892 kfree(cpc_ptr);
0893
0894 out_buf_free:
0895 kfree(output.pointer);
0896 return ret;
0897 }
0898 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
0899
0900
0901
0902
0903
0904
0905
0906 void acpi_cppc_processor_exit(struct acpi_processor *pr)
0907 {
0908 struct cpc_desc *cpc_ptr;
0909 unsigned int i;
0910 void __iomem *addr;
0911 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
0912
0913 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
0914 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
0915 pcc_data[pcc_ss_id]->refcount--;
0916 if (!pcc_data[pcc_ss_id]->refcount) {
0917 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
0918 kfree(pcc_data[pcc_ss_id]);
0919 pcc_data[pcc_ss_id] = NULL;
0920 }
0921 }
0922 }
0923
0924 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
0925 if (!cpc_ptr)
0926 return;
0927
0928
0929 for (i = 2; i < cpc_ptr->num_entries; i++) {
0930 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
0931 if (addr)
0932 iounmap(addr);
0933 }
0934
0935 kobject_put(&cpc_ptr->kobj);
0936 kfree(cpc_ptr);
0937 }
0938 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
0951 {
0952 return -ENOTSUPP;
0953 }
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
0966 {
0967 return -ENOTSUPP;
0968 }
0969
0970
0971
0972
0973
0974
0975
0976 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
0977 {
0978 void __iomem *vaddr = NULL;
0979 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
0980 struct cpc_reg *reg = ®_res->cpc_entry.reg;
0981
0982 if (reg_res->type == ACPI_TYPE_INTEGER) {
0983 *val = reg_res->cpc_entry.int_value;
0984 return 0;
0985 }
0986
0987 *val = 0;
0988
0989 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
0990 u32 width = 8 << (reg->access_width - 1);
0991 u32 val_u32;
0992 acpi_status status;
0993
0994 status = acpi_os_read_port((acpi_io_address)reg->address,
0995 &val_u32, width);
0996 if (ACPI_FAILURE(status)) {
0997 pr_debug("Error: Failed to read SystemIO port %llx\n",
0998 reg->address);
0999 return -EFAULT;
1000 }
1001
1002 *val = val_u32;
1003 return 0;
1004 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1005 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1006 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1007 vaddr = reg_res->sys_mem_vaddr;
1008 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1009 return cpc_read_ffh(cpu, reg, val);
1010 else
1011 return acpi_os_read_memory((acpi_physical_address)reg->address,
1012 val, reg->bit_width);
1013
1014 switch (reg->bit_width) {
1015 case 8:
1016 *val = readb_relaxed(vaddr);
1017 break;
1018 case 16:
1019 *val = readw_relaxed(vaddr);
1020 break;
1021 case 32:
1022 *val = readl_relaxed(vaddr);
1023 break;
1024 case 64:
1025 *val = readq_relaxed(vaddr);
1026 break;
1027 default:
1028 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1029 reg->bit_width, pcc_ss_id);
1030 return -EFAULT;
1031 }
1032
1033 return 0;
1034 }
1035
1036 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1037 {
1038 int ret_val = 0;
1039 void __iomem *vaddr = NULL;
1040 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1041 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1042
1043 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1044 u32 width = 8 << (reg->access_width - 1);
1045 acpi_status status;
1046
1047 status = acpi_os_write_port((acpi_io_address)reg->address,
1048 (u32)val, width);
1049 if (ACPI_FAILURE(status)) {
1050 pr_debug("Error: Failed to write SystemIO port %llx\n",
1051 reg->address);
1052 return -EFAULT;
1053 }
1054
1055 return 0;
1056 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1057 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1058 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1059 vaddr = reg_res->sys_mem_vaddr;
1060 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1061 return cpc_write_ffh(cpu, reg, val);
1062 else
1063 return acpi_os_write_memory((acpi_physical_address)reg->address,
1064 val, reg->bit_width);
1065
1066 switch (reg->bit_width) {
1067 case 8:
1068 writeb_relaxed(val, vaddr);
1069 break;
1070 case 16:
1071 writew_relaxed(val, vaddr);
1072 break;
1073 case 32:
1074 writel_relaxed(val, vaddr);
1075 break;
1076 case 64:
1077 writeq_relaxed(val, vaddr);
1078 break;
1079 default:
1080 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1081 reg->bit_width, pcc_ss_id);
1082 ret_val = -EFAULT;
1083 break;
1084 }
1085
1086 return ret_val;
1087 }
1088
1089 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1090 {
1091 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1092 struct cpc_register_resource *reg;
1093
1094 if (!cpc_desc) {
1095 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1096 return -ENODEV;
1097 }
1098
1099 reg = &cpc_desc->cpc_regs[reg_idx];
1100
1101 if (CPC_IN_PCC(reg)) {
1102 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1103 struct cppc_pcc_data *pcc_ss_data = NULL;
1104 int ret = 0;
1105
1106 if (pcc_ss_id < 0)
1107 return -EIO;
1108
1109 pcc_ss_data = pcc_data[pcc_ss_id];
1110
1111 down_write(&pcc_ss_data->pcc_lock);
1112
1113 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1114 cpc_read(cpunum, reg, perf);
1115 else
1116 ret = -EIO;
1117
1118 up_write(&pcc_ss_data->pcc_lock);
1119
1120 return ret;
1121 }
1122
1123 cpc_read(cpunum, reg, perf);
1124
1125 return 0;
1126 }
1127
1128
1129
1130
1131
1132
1133
1134
1135 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1136 {
1137 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1138 }
1139 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1140
1141
1142
1143
1144
1145
1146
1147
1148 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1149 {
1150 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1161 {
1162 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1163 struct cpc_register_resource *highest_reg, *lowest_reg,
1164 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1165 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1166 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1167 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1168 struct cppc_pcc_data *pcc_ss_data = NULL;
1169 int ret = 0, regs_in_pcc = 0;
1170
1171 if (!cpc_desc) {
1172 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1173 return -ENODEV;
1174 }
1175
1176 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1177 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1178 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1179 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1180 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1181 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1182 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1183
1184
1185 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1186 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1187 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1188 if (pcc_ss_id < 0) {
1189 pr_debug("Invalid pcc_ss_id\n");
1190 return -ENODEV;
1191 }
1192 pcc_ss_data = pcc_data[pcc_ss_id];
1193 regs_in_pcc = 1;
1194 down_write(&pcc_ss_data->pcc_lock);
1195
1196 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1197 ret = -EIO;
1198 goto out_err;
1199 }
1200 }
1201
1202 cpc_read(cpunum, highest_reg, &high);
1203 perf_caps->highest_perf = high;
1204
1205 cpc_read(cpunum, lowest_reg, &low);
1206 perf_caps->lowest_perf = low;
1207
1208 cpc_read(cpunum, nominal_reg, &nom);
1209 perf_caps->nominal_perf = nom;
1210
1211 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1212 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1213 perf_caps->guaranteed_perf = 0;
1214 } else {
1215 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1216 perf_caps->guaranteed_perf = guaranteed;
1217 }
1218
1219 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1220 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1221
1222 if (!high || !low || !nom || !min_nonlinear)
1223 ret = -EFAULT;
1224
1225
1226 if (CPC_SUPPORTED(low_freq_reg))
1227 cpc_read(cpunum, low_freq_reg, &low_f);
1228
1229 if (CPC_SUPPORTED(nom_freq_reg))
1230 cpc_read(cpunum, nom_freq_reg, &nom_f);
1231
1232 perf_caps->lowest_freq = low_f;
1233 perf_caps->nominal_freq = nom_f;
1234
1235
1236 out_err:
1237 if (regs_in_pcc)
1238 up_write(&pcc_ss_data->pcc_lock);
1239 return ret;
1240 }
1241 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1242
1243
1244
1245
1246
1247
1248
1249
1250 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1251 {
1252 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1253 struct cpc_register_resource *delivered_reg, *reference_reg,
1254 *ref_perf_reg, *ctr_wrap_reg;
1255 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1256 struct cppc_pcc_data *pcc_ss_data = NULL;
1257 u64 delivered, reference, ref_perf, ctr_wrap_time;
1258 int ret = 0, regs_in_pcc = 0;
1259
1260 if (!cpc_desc) {
1261 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1262 return -ENODEV;
1263 }
1264
1265 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1266 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1267 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1268 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1269
1270
1271
1272
1273
1274 if (!CPC_SUPPORTED(ref_perf_reg))
1275 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1276
1277
1278 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1279 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1280 if (pcc_ss_id < 0) {
1281 pr_debug("Invalid pcc_ss_id\n");
1282 return -ENODEV;
1283 }
1284 pcc_ss_data = pcc_data[pcc_ss_id];
1285 down_write(&pcc_ss_data->pcc_lock);
1286 regs_in_pcc = 1;
1287
1288 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1289 ret = -EIO;
1290 goto out_err;
1291 }
1292 }
1293
1294 cpc_read(cpunum, delivered_reg, &delivered);
1295 cpc_read(cpunum, reference_reg, &reference);
1296 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1297
1298
1299
1300
1301
1302
1303 ctr_wrap_time = (u64)(~((u64)0));
1304 if (CPC_SUPPORTED(ctr_wrap_reg))
1305 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1306
1307 if (!delivered || !reference || !ref_perf) {
1308 ret = -EFAULT;
1309 goto out_err;
1310 }
1311
1312 perf_fb_ctrs->delivered = delivered;
1313 perf_fb_ctrs->reference = reference;
1314 perf_fb_ctrs->reference_perf = ref_perf;
1315 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1316 out_err:
1317 if (regs_in_pcc)
1318 up_write(&pcc_ss_data->pcc_lock);
1319 return ret;
1320 }
1321 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 int cppc_set_enable(int cpu, bool enable)
1332 {
1333 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1334 struct cpc_register_resource *enable_reg;
1335 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1336 struct cppc_pcc_data *pcc_ss_data = NULL;
1337 int ret = -EINVAL;
1338
1339 if (!cpc_desc) {
1340 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1341 return -EINVAL;
1342 }
1343
1344 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1345
1346 if (CPC_IN_PCC(enable_reg)) {
1347
1348 if (pcc_ss_id < 0)
1349 return -EIO;
1350
1351 ret = cpc_write(cpu, enable_reg, enable);
1352 if (ret)
1353 return ret;
1354
1355 pcc_ss_data = pcc_data[pcc_ss_id];
1356
1357 down_write(&pcc_ss_data->pcc_lock);
1358
1359 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1360 up_write(&pcc_ss_data->pcc_lock);
1361 return ret;
1362 }
1363
1364 return cpc_write(cpu, enable_reg, enable);
1365 }
1366 EXPORT_SYMBOL_GPL(cppc_set_enable);
1367
1368
1369
1370
1371
1372
1373
1374
1375 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1376 {
1377 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1378 struct cpc_register_resource *desired_reg;
1379 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1380 struct cppc_pcc_data *pcc_ss_data = NULL;
1381 int ret = 0;
1382
1383 if (!cpc_desc) {
1384 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1385 return -ENODEV;
1386 }
1387
1388 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1389
1390
1391
1392
1393
1394
1395
1396
1397 if (CPC_IN_PCC(desired_reg)) {
1398 if (pcc_ss_id < 0) {
1399 pr_debug("Invalid pcc_ss_id\n");
1400 return -ENODEV;
1401 }
1402 pcc_ss_data = pcc_data[pcc_ss_id];
1403 down_read(&pcc_ss_data->pcc_lock);
1404 if (pcc_ss_data->platform_owns_pcc) {
1405 ret = check_pcc_chan(pcc_ss_id, false);
1406 if (ret) {
1407 up_read(&pcc_ss_data->pcc_lock);
1408 return ret;
1409 }
1410 }
1411
1412
1413
1414
1415 pcc_ss_data->pending_pcc_write_cmd = true;
1416 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1417 cpc_desc->write_cmd_status = 0;
1418 }
1419
1420
1421
1422
1423
1424 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1425
1426 if (CPC_IN_PCC(desired_reg))
1427 up_read(&pcc_ss_data->pcc_lock);
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 if (CPC_IN_PCC(desired_reg)) {
1475 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {
1476
1477 if (pcc_ss_data->pending_pcc_write_cmd)
1478 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1479 up_write(&pcc_ss_data->pcc_lock);
1480 } else
1481
1482 wait_event(pcc_ss_data->pcc_write_wait_q,
1483 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1484
1485
1486 ret = cpc_desc->write_cmd_status;
1487 }
1488 return ret;
1489 }
1490 EXPORT_SYMBOL_GPL(cppc_set_perf);
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503 unsigned int cppc_get_transition_latency(int cpu_num)
1504 {
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 unsigned int latency_ns = 0;
1517 struct cpc_desc *cpc_desc;
1518 struct cpc_register_resource *desired_reg;
1519 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1520 struct cppc_pcc_data *pcc_ss_data;
1521
1522 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1523 if (!cpc_desc)
1524 return CPUFREQ_ETERNAL;
1525
1526 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1527 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1528 return 0;
1529 else if (!CPC_IN_PCC(desired_reg))
1530 return CPUFREQ_ETERNAL;
1531
1532 if (pcc_ss_id < 0)
1533 return CPUFREQ_ETERNAL;
1534
1535 pcc_ss_data = pcc_data[pcc_ss_id];
1536 if (pcc_ss_data->pcc_mpar)
1537 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1538
1539 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1540 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1541
1542 return latency_ns;
1543 }
1544 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);