0001
0002
0003
0004
0005
0006 #include <linux/bitfield.h>
0007 #include <linux/cpufreq.h>
0008 #include <linux/init.h>
0009 #include <linux/interconnect.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/of_address.h>
0014 #include <linux/of_platform.h>
0015 #include <linux/pm_opp.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/units.h>
0019
0020 #define LUT_MAX_ENTRIES 40U
0021 #define LUT_SRC GENMASK(31, 30)
0022 #define LUT_L_VAL GENMASK(7, 0)
0023 #define LUT_CORE_COUNT GENMASK(18, 16)
0024 #define LUT_VOLT GENMASK(11, 0)
0025 #define CLK_HW_DIV 2
0026 #define LUT_TURBO_IND 1
0027
0028 #define GT_IRQ_STATUS BIT(2)
0029
0030 struct qcom_cpufreq_soc_data {
0031 u32 reg_enable;
0032 u32 reg_domain_state;
0033 u32 reg_dcvs_ctrl;
0034 u32 reg_freq_lut;
0035 u32 reg_volt_lut;
0036 u32 reg_intr_clr;
0037 u32 reg_current_vote;
0038 u32 reg_perf_state;
0039 u8 lut_row_size;
0040 };
0041
0042 struct qcom_cpufreq_data {
0043 void __iomem *base;
0044 struct resource *res;
0045 const struct qcom_cpufreq_soc_data *soc_data;
0046
0047
0048
0049
0050
0051 struct mutex throttle_lock;
0052 int throttle_irq;
0053 char irq_name[15];
0054 bool cancel_throttle;
0055 struct delayed_work throttle_work;
0056 struct cpufreq_policy *policy;
0057
0058 bool per_core_dcvs;
0059 };
0060
0061 static unsigned long cpu_hw_rate, xo_rate;
0062 static bool icc_scaling_enabled;
0063
0064 static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
0065 unsigned long freq_khz)
0066 {
0067 unsigned long freq_hz = freq_khz * 1000;
0068 struct dev_pm_opp *opp;
0069 struct device *dev;
0070 int ret;
0071
0072 dev = get_cpu_device(policy->cpu);
0073 if (!dev)
0074 return -ENODEV;
0075
0076 opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
0077 if (IS_ERR(opp))
0078 return PTR_ERR(opp);
0079
0080 ret = dev_pm_opp_set_opp(dev, opp);
0081 dev_pm_opp_put(opp);
0082 return ret;
0083 }
0084
0085 static int qcom_cpufreq_update_opp(struct device *cpu_dev,
0086 unsigned long freq_khz,
0087 unsigned long volt)
0088 {
0089 unsigned long freq_hz = freq_khz * 1000;
0090 int ret;
0091
0092
0093 if (!icc_scaling_enabled)
0094 return dev_pm_opp_add(cpu_dev, freq_hz, volt);
0095
0096 ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
0097 if (ret) {
0098 dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
0099 return ret;
0100 }
0101
0102 return dev_pm_opp_enable(cpu_dev, freq_hz);
0103 }
0104
0105 static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
0106 unsigned int index)
0107 {
0108 struct qcom_cpufreq_data *data = policy->driver_data;
0109 const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
0110 unsigned long freq = policy->freq_table[index].frequency;
0111 unsigned int i;
0112
0113 writel_relaxed(index, data->base + soc_data->reg_perf_state);
0114
0115 if (data->per_core_dcvs)
0116 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
0117 writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
0118
0119 if (icc_scaling_enabled)
0120 qcom_cpufreq_set_bw(policy, freq);
0121
0122 return 0;
0123 }
0124
0125 static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
0126 {
0127 struct qcom_cpufreq_data *data;
0128 const struct qcom_cpufreq_soc_data *soc_data;
0129 struct cpufreq_policy *policy;
0130 unsigned int index;
0131
0132 policy = cpufreq_cpu_get_raw(cpu);
0133 if (!policy)
0134 return 0;
0135
0136 data = policy->driver_data;
0137 soc_data = data->soc_data;
0138
0139 index = readl_relaxed(data->base + soc_data->reg_perf_state);
0140 index = min(index, LUT_MAX_ENTRIES - 1);
0141
0142 return policy->freq_table[index].frequency;
0143 }
0144
0145 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
0146 unsigned int target_freq)
0147 {
0148 struct qcom_cpufreq_data *data = policy->driver_data;
0149 const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
0150 unsigned int index;
0151 unsigned int i;
0152
0153 index = policy->cached_resolved_idx;
0154 writel_relaxed(index, data->base + soc_data->reg_perf_state);
0155
0156 if (data->per_core_dcvs)
0157 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
0158 writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
0159
0160 return policy->freq_table[index].frequency;
0161 }
0162
0163 static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
0164 struct cpufreq_policy *policy)
0165 {
0166 u32 data, src, lval, i, core_count, prev_freq = 0, freq;
0167 u32 volt;
0168 struct cpufreq_frequency_table *table;
0169 struct dev_pm_opp *opp;
0170 unsigned long rate;
0171 int ret;
0172 struct qcom_cpufreq_data *drv_data = policy->driver_data;
0173 const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
0174
0175 table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
0176 if (!table)
0177 return -ENOMEM;
0178
0179 ret = dev_pm_opp_of_add_table(cpu_dev);
0180 if (!ret) {
0181
0182 icc_scaling_enabled = true;
0183 for (rate = 0; ; rate++) {
0184 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
0185 if (IS_ERR(opp))
0186 break;
0187
0188 dev_pm_opp_put(opp);
0189 dev_pm_opp_disable(cpu_dev, rate);
0190 }
0191 } else if (ret != -ENODEV) {
0192 dev_err(cpu_dev, "Invalid opp table in device tree\n");
0193 return ret;
0194 } else {
0195 policy->fast_switch_possible = true;
0196 icc_scaling_enabled = false;
0197 }
0198
0199 for (i = 0; i < LUT_MAX_ENTRIES; i++) {
0200 data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
0201 i * soc_data->lut_row_size);
0202 src = FIELD_GET(LUT_SRC, data);
0203 lval = FIELD_GET(LUT_L_VAL, data);
0204 core_count = FIELD_GET(LUT_CORE_COUNT, data);
0205
0206 data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
0207 i * soc_data->lut_row_size);
0208 volt = FIELD_GET(LUT_VOLT, data) * 1000;
0209
0210 if (src)
0211 freq = xo_rate * lval / 1000;
0212 else
0213 freq = cpu_hw_rate / 1000;
0214
0215 if (freq != prev_freq && core_count != LUT_TURBO_IND) {
0216 if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
0217 table[i].frequency = freq;
0218 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
0219 freq, core_count);
0220 } else {
0221 dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
0222 table[i].frequency = CPUFREQ_ENTRY_INVALID;
0223 }
0224
0225 } else if (core_count == LUT_TURBO_IND) {
0226 table[i].frequency = CPUFREQ_ENTRY_INVALID;
0227 }
0228
0229
0230
0231
0232
0233 if (i > 0 && prev_freq == freq) {
0234 struct cpufreq_frequency_table *prev = &table[i - 1];
0235
0236
0237
0238
0239
0240 if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
0241 if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
0242 prev->frequency = prev_freq;
0243 prev->flags = CPUFREQ_BOOST_FREQ;
0244 } else {
0245 dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
0246 freq);
0247 }
0248 }
0249
0250 break;
0251 }
0252
0253 prev_freq = freq;
0254 }
0255
0256 table[i].frequency = CPUFREQ_TABLE_END;
0257 policy->freq_table = table;
0258 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
0259
0260 return 0;
0261 }
0262
0263 static void qcom_get_related_cpus(int index, struct cpumask *m)
0264 {
0265 struct device_node *cpu_np;
0266 struct of_phandle_args args;
0267 int cpu, ret;
0268
0269 for_each_possible_cpu(cpu) {
0270 cpu_np = of_cpu_device_node_get(cpu);
0271 if (!cpu_np)
0272 continue;
0273
0274 ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
0275 "#freq-domain-cells", 0,
0276 &args);
0277 of_node_put(cpu_np);
0278 if (ret < 0)
0279 continue;
0280
0281 if (index == args.args[0])
0282 cpumask_set_cpu(cpu, m);
0283 }
0284 }
0285
0286 static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
0287 {
0288 unsigned int lval;
0289
0290 if (data->soc_data->reg_current_vote)
0291 lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
0292 else
0293 lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
0294
0295 return lval * xo_rate;
0296 }
0297
0298 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
0299 {
0300 struct cpufreq_policy *policy = data->policy;
0301 int cpu = cpumask_first(policy->related_cpus);
0302 struct device *dev = get_cpu_device(cpu);
0303 unsigned long freq_hz, throttled_freq;
0304 struct dev_pm_opp *opp;
0305
0306
0307
0308
0309
0310 freq_hz = qcom_lmh_get_throttle_freq(data);
0311
0312 opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
0313 if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
0314 opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
0315
0316 if (IS_ERR(opp)) {
0317 dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
0318 } else {
0319 throttled_freq = freq_hz / HZ_PER_KHZ;
0320
0321
0322 arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
0323
0324 dev_pm_opp_put(opp);
0325 }
0326
0327
0328
0329
0330
0331 mutex_lock(&data->throttle_lock);
0332 if (data->cancel_throttle)
0333 goto out;
0334
0335
0336
0337
0338
0339 if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
0340 enable_irq(data->throttle_irq);
0341 else
0342 mod_delayed_work(system_highpri_wq, &data->throttle_work,
0343 msecs_to_jiffies(10));
0344
0345 out:
0346 mutex_unlock(&data->throttle_lock);
0347 }
0348
0349 static void qcom_lmh_dcvs_poll(struct work_struct *work)
0350 {
0351 struct qcom_cpufreq_data *data;
0352
0353 data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
0354 qcom_lmh_dcvs_notify(data);
0355 }
0356
0357 static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
0358 {
0359 struct qcom_cpufreq_data *c_data = data;
0360
0361
0362 disable_irq_nosync(c_data->throttle_irq);
0363 schedule_delayed_work(&c_data->throttle_work, 0);
0364
0365 if (c_data->soc_data->reg_intr_clr)
0366 writel_relaxed(GT_IRQ_STATUS,
0367 c_data->base + c_data->soc_data->reg_intr_clr);
0368
0369 return IRQ_HANDLED;
0370 }
0371
0372 static const struct qcom_cpufreq_soc_data qcom_soc_data = {
0373 .reg_enable = 0x0,
0374 .reg_dcvs_ctrl = 0xbc,
0375 .reg_freq_lut = 0x110,
0376 .reg_volt_lut = 0x114,
0377 .reg_current_vote = 0x704,
0378 .reg_perf_state = 0x920,
0379 .lut_row_size = 32,
0380 };
0381
0382 static const struct qcom_cpufreq_soc_data epss_soc_data = {
0383 .reg_enable = 0x0,
0384 .reg_domain_state = 0x20,
0385 .reg_dcvs_ctrl = 0xb0,
0386 .reg_freq_lut = 0x100,
0387 .reg_volt_lut = 0x200,
0388 .reg_intr_clr = 0x308,
0389 .reg_perf_state = 0x320,
0390 .lut_row_size = 4,
0391 };
0392
0393 static const struct of_device_id qcom_cpufreq_hw_match[] = {
0394 { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
0395 { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
0396 {}
0397 };
0398 MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
0399
0400 static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
0401 {
0402 struct qcom_cpufreq_data *data = policy->driver_data;
0403 struct platform_device *pdev = cpufreq_get_driver_data();
0404 int ret;
0405
0406
0407
0408
0409
0410 data->throttle_irq = platform_get_irq_optional(pdev, index);
0411 if (data->throttle_irq == -ENXIO)
0412 return 0;
0413 if (data->throttle_irq < 0)
0414 return data->throttle_irq;
0415
0416 data->cancel_throttle = false;
0417 data->policy = policy;
0418
0419 mutex_init(&data->throttle_lock);
0420 INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
0421
0422 snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
0423 ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
0424 IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
0425 if (ret) {
0426 dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
0427 return 0;
0428 }
0429
0430 ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
0431 if (ret)
0432 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
0433 data->irq_name, data->throttle_irq);
0434
0435 return 0;
0436 }
0437
0438 static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
0439 {
0440 struct qcom_cpufreq_data *data = policy->driver_data;
0441 struct platform_device *pdev = cpufreq_get_driver_data();
0442 int ret;
0443
0444 if (data->throttle_irq <= 0)
0445 return 0;
0446
0447 mutex_lock(&data->throttle_lock);
0448 data->cancel_throttle = false;
0449 mutex_unlock(&data->throttle_lock);
0450
0451 ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
0452 if (ret)
0453 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
0454 data->irq_name, data->throttle_irq);
0455
0456 return ret;
0457 }
0458
0459 static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
0460 {
0461 struct qcom_cpufreq_data *data = policy->driver_data;
0462
0463 if (data->throttle_irq <= 0)
0464 return 0;
0465
0466 mutex_lock(&data->throttle_lock);
0467 data->cancel_throttle = true;
0468 mutex_unlock(&data->throttle_lock);
0469
0470 cancel_delayed_work_sync(&data->throttle_work);
0471 irq_set_affinity_and_hint(data->throttle_irq, NULL);
0472 disable_irq_nosync(data->throttle_irq);
0473
0474 return 0;
0475 }
0476
0477 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
0478 {
0479 if (data->throttle_irq <= 0)
0480 return;
0481
0482 free_irq(data->throttle_irq, data);
0483 }
0484
0485 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
0486 {
0487 struct platform_device *pdev = cpufreq_get_driver_data();
0488 struct device *dev = &pdev->dev;
0489 struct of_phandle_args args;
0490 struct device_node *cpu_np;
0491 struct device *cpu_dev;
0492 struct resource *res;
0493 void __iomem *base;
0494 struct qcom_cpufreq_data *data;
0495 int ret, index;
0496
0497 cpu_dev = get_cpu_device(policy->cpu);
0498 if (!cpu_dev) {
0499 pr_err("%s: failed to get cpu%d device\n", __func__,
0500 policy->cpu);
0501 return -ENODEV;
0502 }
0503
0504 cpu_np = of_cpu_device_node_get(policy->cpu);
0505 if (!cpu_np)
0506 return -EINVAL;
0507
0508 ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
0509 "#freq-domain-cells", 0, &args);
0510 of_node_put(cpu_np);
0511 if (ret)
0512 return ret;
0513
0514 index = args.args[0];
0515
0516 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
0517 if (!res) {
0518 dev_err(dev, "failed to get mem resource %d\n", index);
0519 return -ENODEV;
0520 }
0521
0522 if (!request_mem_region(res->start, resource_size(res), res->name)) {
0523 dev_err(dev, "failed to request resource %pR\n", res);
0524 return -EBUSY;
0525 }
0526
0527 base = ioremap(res->start, resource_size(res));
0528 if (!base) {
0529 dev_err(dev, "failed to map resource %pR\n", res);
0530 ret = -ENOMEM;
0531 goto release_region;
0532 }
0533
0534 data = kzalloc(sizeof(*data), GFP_KERNEL);
0535 if (!data) {
0536 ret = -ENOMEM;
0537 goto unmap_base;
0538 }
0539
0540 data->soc_data = of_device_get_match_data(&pdev->dev);
0541 data->base = base;
0542 data->res = res;
0543
0544
0545 if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
0546 dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
0547 ret = -ENODEV;
0548 goto error;
0549 }
0550
0551 if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
0552 data->per_core_dcvs = true;
0553
0554 qcom_get_related_cpus(index, policy->cpus);
0555 if (cpumask_empty(policy->cpus)) {
0556 dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
0557 ret = -ENOENT;
0558 goto error;
0559 }
0560
0561 policy->driver_data = data;
0562 policy->dvfs_possible_from_any_cpu = true;
0563
0564 ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
0565 if (ret) {
0566 dev_err(dev, "Domain-%d failed to read LUT\n", index);
0567 goto error;
0568 }
0569
0570 ret = dev_pm_opp_get_opp_count(cpu_dev);
0571 if (ret <= 0) {
0572 dev_err(cpu_dev, "Failed to add OPPs\n");
0573 ret = -ENODEV;
0574 goto error;
0575 }
0576
0577 if (policy_has_boost_freq(policy)) {
0578 ret = cpufreq_enable_boost_support();
0579 if (ret)
0580 dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
0581 }
0582
0583 ret = qcom_cpufreq_hw_lmh_init(policy, index);
0584 if (ret)
0585 goto error;
0586
0587 return 0;
0588 error:
0589 kfree(data);
0590 unmap_base:
0591 iounmap(base);
0592 release_region:
0593 release_mem_region(res->start, resource_size(res));
0594 return ret;
0595 }
0596
0597 static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
0598 {
0599 struct device *cpu_dev = get_cpu_device(policy->cpu);
0600 struct qcom_cpufreq_data *data = policy->driver_data;
0601 struct resource *res = data->res;
0602 void __iomem *base = data->base;
0603
0604 dev_pm_opp_remove_all_dynamic(cpu_dev);
0605 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
0606 qcom_cpufreq_hw_lmh_exit(data);
0607 kfree(policy->freq_table);
0608 kfree(data);
0609 iounmap(base);
0610 release_mem_region(res->start, resource_size(res));
0611
0612 return 0;
0613 }
0614
0615 static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
0616 {
0617 struct qcom_cpufreq_data *data = policy->driver_data;
0618
0619 if (data->throttle_irq >= 0)
0620 enable_irq(data->throttle_irq);
0621 }
0622
0623 static struct freq_attr *qcom_cpufreq_hw_attr[] = {
0624 &cpufreq_freq_attr_scaling_available_freqs,
0625 &cpufreq_freq_attr_scaling_boost_freqs,
0626 NULL
0627 };
0628
0629 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
0630 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
0631 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
0632 CPUFREQ_IS_COOLING_DEV,
0633 .verify = cpufreq_generic_frequency_table_verify,
0634 .target_index = qcom_cpufreq_hw_target_index,
0635 .get = qcom_cpufreq_hw_get,
0636 .init = qcom_cpufreq_hw_cpu_init,
0637 .exit = qcom_cpufreq_hw_cpu_exit,
0638 .online = qcom_cpufreq_hw_cpu_online,
0639 .offline = qcom_cpufreq_hw_cpu_offline,
0640 .register_em = cpufreq_register_em_with_opp,
0641 .fast_switch = qcom_cpufreq_hw_fast_switch,
0642 .name = "qcom-cpufreq-hw",
0643 .attr = qcom_cpufreq_hw_attr,
0644 .ready = qcom_cpufreq_ready,
0645 };
0646
0647 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
0648 {
0649 struct device *cpu_dev;
0650 struct clk *clk;
0651 int ret;
0652
0653 clk = clk_get(&pdev->dev, "xo");
0654 if (IS_ERR(clk))
0655 return PTR_ERR(clk);
0656
0657 xo_rate = clk_get_rate(clk);
0658 clk_put(clk);
0659
0660 clk = clk_get(&pdev->dev, "alternate");
0661 if (IS_ERR(clk))
0662 return PTR_ERR(clk);
0663
0664 cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
0665 clk_put(clk);
0666
0667 cpufreq_qcom_hw_driver.driver_data = pdev;
0668
0669
0670 cpu_dev = get_cpu_device(0);
0671 if (!cpu_dev)
0672 return -EPROBE_DEFER;
0673
0674 ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
0675 if (ret)
0676 return ret;
0677
0678 ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
0679 if (ret)
0680 dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
0681 else
0682 dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
0683
0684 return ret;
0685 }
0686
0687 static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
0688 {
0689 return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
0690 }
0691
0692 static struct platform_driver qcom_cpufreq_hw_driver = {
0693 .probe = qcom_cpufreq_hw_driver_probe,
0694 .remove = qcom_cpufreq_hw_driver_remove,
0695 .driver = {
0696 .name = "qcom-cpufreq-hw",
0697 .of_match_table = qcom_cpufreq_hw_match,
0698 },
0699 };
0700
0701 static int __init qcom_cpufreq_hw_init(void)
0702 {
0703 return platform_driver_register(&qcom_cpufreq_hw_driver);
0704 }
0705 postcore_initcall(qcom_cpufreq_hw_init);
0706
0707 static void __exit qcom_cpufreq_hw_exit(void)
0708 {
0709 platform_driver_unregister(&qcom_cpufreq_hw_driver);
0710 }
0711 module_exit(qcom_cpufreq_hw_exit);
0712
0713 MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
0714 MODULE_LICENSE("GPL v2");