0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0024
0025 #include <linux/kernel.h>
0026 #include <linux/module.h>
0027 #include <linux/init.h>
0028 #include <linux/smp.h>
0029 #include <linux/sched.h>
0030 #include <linux/cpufreq.h>
0031 #include <linux/compiler.h>
0032 #include <linux/dmi.h>
0033 #include <linux/slab.h>
0034 #include <linux/acpi.h>
0035 #include <linux/io.h>
0036 #include <linux/delay.h>
0037 #include <linux/uaccess.h>
0038 #include <linux/static_call.h>
0039
0040 #include <acpi/processor.h>
0041 #include <acpi/cppc_acpi.h>
0042
0043 #include <asm/msr.h>
0044 #include <asm/processor.h>
0045 #include <asm/cpufeature.h>
0046 #include <asm/cpu_device_id.h>
0047 #include "amd-pstate-trace.h"
0048
0049 #define AMD_PSTATE_TRANSITION_LATENCY 0x20000
0050 #define AMD_PSTATE_TRANSITION_DELAY 500
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 static bool shared_mem = false;
0062 module_param(shared_mem, bool, 0444);
0063 MODULE_PARM_DESC(shared_mem,
0064 "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
0065
0066 static struct cpufreq_driver amd_pstate_driver;
0067
0068
0069
0070
0071
0072
0073
0074 struct amd_aperf_mperf {
0075 u64 aperf;
0076 u64 mperf;
0077 u64 tsc;
0078 };
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 struct amd_cpudata {
0105 int cpu;
0106
0107 struct freq_qos_request req[2];
0108 u64 cppc_req_cached;
0109
0110 u32 highest_perf;
0111 u32 nominal_perf;
0112 u32 lowest_nonlinear_perf;
0113 u32 lowest_perf;
0114
0115 u32 max_freq;
0116 u32 min_freq;
0117 u32 nominal_freq;
0118 u32 lowest_nonlinear_freq;
0119
0120 struct amd_aperf_mperf cur;
0121 struct amd_aperf_mperf prev;
0122
0123 u64 freq;
0124 bool boost_supported;
0125 };
0126
0127 static inline int pstate_enable(bool enable)
0128 {
0129 return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
0130 }
0131
0132 static int cppc_enable(bool enable)
0133 {
0134 int cpu, ret = 0;
0135
0136 for_each_present_cpu(cpu) {
0137 ret = cppc_set_enable(cpu, enable);
0138 if (ret)
0139 return ret;
0140 }
0141
0142 return ret;
0143 }
0144
0145 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
0146
0147 static inline int amd_pstate_enable(bool enable)
0148 {
0149 return static_call(amd_pstate_enable)(enable);
0150 }
0151
0152 static int pstate_init_perf(struct amd_cpudata *cpudata)
0153 {
0154 u64 cap1;
0155
0156 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
0157 &cap1);
0158 if (ret)
0159 return ret;
0160
0161
0162
0163
0164
0165
0166 WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
0167
0168 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
0169 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
0170 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
0171
0172 return 0;
0173 }
0174
0175 static int cppc_init_perf(struct amd_cpudata *cpudata)
0176 {
0177 struct cppc_perf_caps cppc_perf;
0178
0179 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
0180 if (ret)
0181 return ret;
0182
0183 WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
0184
0185 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
0186 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
0187 cppc_perf.lowest_nonlinear_perf);
0188 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
0189
0190 return 0;
0191 }
0192
0193 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
0194
0195 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
0196 {
0197 return static_call(amd_pstate_init_perf)(cpudata);
0198 }
0199
0200 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
0201 u32 des_perf, u32 max_perf, bool fast_switch)
0202 {
0203 if (fast_switch)
0204 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
0205 else
0206 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
0207 READ_ONCE(cpudata->cppc_req_cached));
0208 }
0209
0210 static void cppc_update_perf(struct amd_cpudata *cpudata,
0211 u32 min_perf, u32 des_perf,
0212 u32 max_perf, bool fast_switch)
0213 {
0214 struct cppc_perf_ctrls perf_ctrls;
0215
0216 perf_ctrls.max_perf = max_perf;
0217 perf_ctrls.min_perf = min_perf;
0218 perf_ctrls.desired_perf = des_perf;
0219
0220 cppc_set_perf(cpudata->cpu, &perf_ctrls);
0221 }
0222
0223 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
0224
0225 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
0226 u32 min_perf, u32 des_perf,
0227 u32 max_perf, bool fast_switch)
0228 {
0229 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
0230 max_perf, fast_switch);
0231 }
0232
0233 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
0234 {
0235 u64 aperf, mperf, tsc;
0236 unsigned long flags;
0237
0238 local_irq_save(flags);
0239 rdmsrl(MSR_IA32_APERF, aperf);
0240 rdmsrl(MSR_IA32_MPERF, mperf);
0241 tsc = rdtsc();
0242
0243 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
0244 local_irq_restore(flags);
0245 return false;
0246 }
0247
0248 local_irq_restore(flags);
0249
0250 cpudata->cur.aperf = aperf;
0251 cpudata->cur.mperf = mperf;
0252 cpudata->cur.tsc = tsc;
0253 cpudata->cur.aperf -= cpudata->prev.aperf;
0254 cpudata->cur.mperf -= cpudata->prev.mperf;
0255 cpudata->cur.tsc -= cpudata->prev.tsc;
0256
0257 cpudata->prev.aperf = aperf;
0258 cpudata->prev.mperf = mperf;
0259 cpudata->prev.tsc = tsc;
0260
0261 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
0262
0263 return true;
0264 }
0265
0266 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
0267 u32 des_perf, u32 max_perf, bool fast_switch)
0268 {
0269 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
0270 u64 value = prev;
0271
0272 value &= ~AMD_CPPC_MIN_PERF(~0L);
0273 value |= AMD_CPPC_MIN_PERF(min_perf);
0274
0275 value &= ~AMD_CPPC_DES_PERF(~0L);
0276 value |= AMD_CPPC_DES_PERF(des_perf);
0277
0278 value &= ~AMD_CPPC_MAX_PERF(~0L);
0279 value |= AMD_CPPC_MAX_PERF(max_perf);
0280
0281 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
0282 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
0283 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
0284 cpudata->cpu, (value != prev), fast_switch);
0285 }
0286
0287 if (value == prev)
0288 return;
0289
0290 WRITE_ONCE(cpudata->cppc_req_cached, value);
0291
0292 amd_pstate_update_perf(cpudata, min_perf, des_perf,
0293 max_perf, fast_switch);
0294 }
0295
0296 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
0297 {
0298 cpufreq_verify_within_cpu_limits(policy);
0299
0300 return 0;
0301 }
0302
0303 static int amd_pstate_target(struct cpufreq_policy *policy,
0304 unsigned int target_freq,
0305 unsigned int relation)
0306 {
0307 struct cpufreq_freqs freqs;
0308 struct amd_cpudata *cpudata = policy->driver_data;
0309 unsigned long max_perf, min_perf, des_perf, cap_perf;
0310
0311 if (!cpudata->max_freq)
0312 return -ENODEV;
0313
0314 cap_perf = READ_ONCE(cpudata->highest_perf);
0315 min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
0316 max_perf = cap_perf;
0317
0318 freqs.old = policy->cur;
0319 freqs.new = target_freq;
0320
0321 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
0322 cpudata->max_freq);
0323
0324 cpufreq_freq_transition_begin(policy, &freqs);
0325 amd_pstate_update(cpudata, min_perf, des_perf,
0326 max_perf, false);
0327 cpufreq_freq_transition_end(policy, &freqs, false);
0328
0329 return 0;
0330 }
0331
0332 static void amd_pstate_adjust_perf(unsigned int cpu,
0333 unsigned long _min_perf,
0334 unsigned long target_perf,
0335 unsigned long capacity)
0336 {
0337 unsigned long max_perf, min_perf, des_perf,
0338 cap_perf, lowest_nonlinear_perf;
0339 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
0340 struct amd_cpudata *cpudata = policy->driver_data;
0341
0342 cap_perf = READ_ONCE(cpudata->highest_perf);
0343 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
0344
0345 des_perf = cap_perf;
0346 if (target_perf < capacity)
0347 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
0348
0349 min_perf = READ_ONCE(cpudata->highest_perf);
0350 if (_min_perf < capacity)
0351 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
0352
0353 if (min_perf < lowest_nonlinear_perf)
0354 min_perf = lowest_nonlinear_perf;
0355
0356 max_perf = cap_perf;
0357 if (max_perf < min_perf)
0358 max_perf = min_perf;
0359
0360 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
0361
0362 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
0363 }
0364
0365 static int amd_get_min_freq(struct amd_cpudata *cpudata)
0366 {
0367 struct cppc_perf_caps cppc_perf;
0368
0369 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
0370 if (ret)
0371 return ret;
0372
0373
0374 return cppc_perf.lowest_freq * 1000;
0375 }
0376
0377 static int amd_get_max_freq(struct amd_cpudata *cpudata)
0378 {
0379 struct cppc_perf_caps cppc_perf;
0380 u32 max_perf, max_freq, nominal_freq, nominal_perf;
0381 u64 boost_ratio;
0382
0383 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
0384 if (ret)
0385 return ret;
0386
0387 nominal_freq = cppc_perf.nominal_freq;
0388 nominal_perf = READ_ONCE(cpudata->nominal_perf);
0389 max_perf = READ_ONCE(cpudata->highest_perf);
0390
0391 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
0392 nominal_perf);
0393
0394 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
0395
0396
0397 return max_freq * 1000;
0398 }
0399
0400 static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
0401 {
0402 struct cppc_perf_caps cppc_perf;
0403
0404 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
0405 if (ret)
0406 return ret;
0407
0408
0409 return cppc_perf.nominal_freq * 1000;
0410 }
0411
0412 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
0413 {
0414 struct cppc_perf_caps cppc_perf;
0415 u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
0416 nominal_freq, nominal_perf;
0417 u64 lowest_nonlinear_ratio;
0418
0419 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
0420 if (ret)
0421 return ret;
0422
0423 nominal_freq = cppc_perf.nominal_freq;
0424 nominal_perf = READ_ONCE(cpudata->nominal_perf);
0425
0426 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
0427
0428 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
0429 nominal_perf);
0430
0431 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
0432
0433
0434 return lowest_nonlinear_freq * 1000;
0435 }
0436
0437 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
0438 {
0439 struct amd_cpudata *cpudata = policy->driver_data;
0440 int ret;
0441
0442 if (!cpudata->boost_supported) {
0443 pr_err("Boost mode is not supported by this processor or SBIOS\n");
0444 return -EINVAL;
0445 }
0446
0447 if (state)
0448 policy->cpuinfo.max_freq = cpudata->max_freq;
0449 else
0450 policy->cpuinfo.max_freq = cpudata->nominal_freq;
0451
0452 policy->max = policy->cpuinfo.max_freq;
0453
0454 ret = freq_qos_update_request(&cpudata->req[1],
0455 policy->cpuinfo.max_freq);
0456 if (ret < 0)
0457 return ret;
0458
0459 return 0;
0460 }
0461
0462 static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
0463 {
0464 u32 highest_perf, nominal_perf;
0465
0466 highest_perf = READ_ONCE(cpudata->highest_perf);
0467 nominal_perf = READ_ONCE(cpudata->nominal_perf);
0468
0469 if (highest_perf <= nominal_perf)
0470 return;
0471
0472 cpudata->boost_supported = true;
0473 amd_pstate_driver.boost_enabled = true;
0474 }
0475
0476 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
0477 {
0478 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
0479 struct device *dev;
0480 struct amd_cpudata *cpudata;
0481
0482 dev = get_cpu_device(policy->cpu);
0483 if (!dev)
0484 return -ENODEV;
0485
0486 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
0487 if (!cpudata)
0488 return -ENOMEM;
0489
0490 cpudata->cpu = policy->cpu;
0491
0492 ret = amd_pstate_init_perf(cpudata);
0493 if (ret)
0494 goto free_cpudata1;
0495
0496 min_freq = amd_get_min_freq(cpudata);
0497 max_freq = amd_get_max_freq(cpudata);
0498 nominal_freq = amd_get_nominal_freq(cpudata);
0499 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
0500
0501 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
0502 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
0503 min_freq, max_freq);
0504 ret = -EINVAL;
0505 goto free_cpudata1;
0506 }
0507
0508 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
0509 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
0510
0511 policy->min = min_freq;
0512 policy->max = max_freq;
0513
0514 policy->cpuinfo.min_freq = min_freq;
0515 policy->cpuinfo.max_freq = max_freq;
0516
0517
0518 policy->cur = policy->cpuinfo.min_freq;
0519
0520 if (boot_cpu_has(X86_FEATURE_CPPC))
0521 policy->fast_switch_possible = true;
0522
0523 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
0524 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
0525 if (ret < 0) {
0526 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
0527 goto free_cpudata1;
0528 }
0529
0530 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
0531 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
0532 if (ret < 0) {
0533 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
0534 goto free_cpudata2;
0535 }
0536
0537
0538 cpudata->max_freq = max_freq;
0539 cpudata->min_freq = min_freq;
0540 cpudata->nominal_freq = nominal_freq;
0541 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
0542
0543 policy->driver_data = cpudata;
0544
0545 amd_pstate_boost_init(cpudata);
0546
0547 return 0;
0548
0549 free_cpudata2:
0550 freq_qos_remove_request(&cpudata->req[0]);
0551 free_cpudata1:
0552 kfree(cpudata);
0553 return ret;
0554 }
0555
0556 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
0557 {
0558 struct amd_cpudata *cpudata;
0559
0560 cpudata = policy->driver_data;
0561
0562 freq_qos_remove_request(&cpudata->req[1]);
0563 freq_qos_remove_request(&cpudata->req[0]);
0564 kfree(cpudata);
0565
0566 return 0;
0567 }
0568
0569 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
0570 {
0571 int ret;
0572
0573 ret = amd_pstate_enable(true);
0574 if (ret)
0575 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
0576
0577 return ret;
0578 }
0579
0580 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
0581 {
0582 int ret;
0583
0584 ret = amd_pstate_enable(false);
0585 if (ret)
0586 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
0587
0588 return ret;
0589 }
0590
0591
0592
0593
0594
0595
0596
0597
0598 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
0599 char *buf)
0600 {
0601 int max_freq;
0602 struct amd_cpudata *cpudata;
0603
0604 cpudata = policy->driver_data;
0605
0606 max_freq = amd_get_max_freq(cpudata);
0607 if (max_freq < 0)
0608 return max_freq;
0609
0610 return sprintf(&buf[0], "%u\n", max_freq);
0611 }
0612
0613 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
0614 char *buf)
0615 {
0616 int freq;
0617 struct amd_cpudata *cpudata;
0618
0619 cpudata = policy->driver_data;
0620
0621 freq = amd_get_lowest_nonlinear_freq(cpudata);
0622 if (freq < 0)
0623 return freq;
0624
0625 return sprintf(&buf[0], "%u\n", freq);
0626 }
0627
0628
0629
0630
0631
0632 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
0633 char *buf)
0634 {
0635 u32 perf;
0636 struct amd_cpudata *cpudata = policy->driver_data;
0637
0638 perf = READ_ONCE(cpudata->highest_perf);
0639
0640 return sprintf(&buf[0], "%u\n", perf);
0641 }
0642
0643 cpufreq_freq_attr_ro(amd_pstate_max_freq);
0644 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
0645
0646 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
0647
0648 static struct freq_attr *amd_pstate_attr[] = {
0649 &amd_pstate_max_freq,
0650 &amd_pstate_lowest_nonlinear_freq,
0651 &amd_pstate_highest_perf,
0652 NULL,
0653 };
0654
0655 static struct cpufreq_driver amd_pstate_driver = {
0656 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
0657 .verify = amd_pstate_verify,
0658 .target = amd_pstate_target,
0659 .init = amd_pstate_cpu_init,
0660 .exit = amd_pstate_cpu_exit,
0661 .suspend = amd_pstate_cpu_suspend,
0662 .resume = amd_pstate_cpu_resume,
0663 .set_boost = amd_pstate_set_boost,
0664 .name = "amd-pstate",
0665 .attr = amd_pstate_attr,
0666 };
0667
0668 static int __init amd_pstate_init(void)
0669 {
0670 int ret;
0671
0672 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
0673 return -ENODEV;
0674
0675 if (!acpi_cpc_valid()) {
0676 pr_debug("the _CPC object is not present in SBIOS\n");
0677 return -ENODEV;
0678 }
0679
0680
0681 if (cpufreq_get_current_driver())
0682 return -EEXIST;
0683
0684
0685 if (boot_cpu_has(X86_FEATURE_CPPC)) {
0686 pr_debug("AMD CPPC MSR based functionality is supported\n");
0687 amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
0688 } else if (shared_mem) {
0689 static_call_update(amd_pstate_enable, cppc_enable);
0690 static_call_update(amd_pstate_init_perf, cppc_init_perf);
0691 static_call_update(amd_pstate_update_perf, cppc_update_perf);
0692 } else {
0693 pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
0694 return -ENODEV;
0695 }
0696
0697
0698 ret = amd_pstate_enable(true);
0699 if (ret) {
0700 pr_err("failed to enable amd-pstate with return %d\n", ret);
0701 return ret;
0702 }
0703
0704 ret = cpufreq_register_driver(&amd_pstate_driver);
0705 if (ret)
0706 pr_err("failed to register amd_pstate_driver with return %d\n",
0707 ret);
0708
0709 return ret;
0710 }
0711
0712 static void __exit amd_pstate_exit(void)
0713 {
0714 cpufreq_unregister_driver(&amd_pstate_driver);
0715
0716 amd_pstate_enable(false);
0717 }
0718
0719 module_init(amd_pstate_init);
0720 module_exit(amd_pstate_exit);
0721
0722 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
0723 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
0724 MODULE_LICENSE("GPL");