0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/cpufreq.h>
0011 #include <linux/sched.h>
0012 #include <linux/sched/loadavg.h>
0013 #include <linux/module.h>
0014 #include <linux/timer.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/atomic.h>
0017 #include <asm/machdep.h>
0018 #include <asm/spu.h>
0019
0020 #define POLL_TIME 100000
0021 #define EXP 753
0022
0023 struct spu_gov_info_struct {
0024 unsigned long busy_spus;
0025 struct cpufreq_policy *policy;
0026 struct delayed_work work;
0027 unsigned int poll_int;
0028 };
0029 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
0030
0031 static int calc_freq(struct spu_gov_info_struct *info)
0032 {
0033 int cpu;
0034 int busy_spus;
0035
0036 cpu = info->policy->cpu;
0037 busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
0038
0039 info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
0040 pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
0041 cpu, busy_spus, info->busy_spus);
0042
0043 return info->policy->max * info->busy_spus / FIXED_1;
0044 }
0045
0046 static void spu_gov_work(struct work_struct *work)
0047 {
0048 struct spu_gov_info_struct *info;
0049 int delay;
0050 unsigned long target_freq;
0051
0052 info = container_of(work, struct spu_gov_info_struct, work.work);
0053
0054
0055 BUG_ON(info->policy == NULL);
0056
0057 target_freq = calc_freq(info);
0058 __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
0059
0060 delay = usecs_to_jiffies(info->poll_int);
0061 schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
0062 }
0063
0064 static void spu_gov_init_work(struct spu_gov_info_struct *info)
0065 {
0066 int delay = usecs_to_jiffies(info->poll_int);
0067 INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
0068 schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
0069 }
0070
0071 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
0072 {
0073 cancel_delayed_work_sync(&info->work);
0074 }
0075
0076 static int spu_gov_start(struct cpufreq_policy *policy)
0077 {
0078 unsigned int cpu = policy->cpu;
0079 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
0080 struct spu_gov_info_struct *affected_info;
0081 int i;
0082
0083 if (!cpu_online(cpu)) {
0084 printk(KERN_ERR "cpu %d is not online\n", cpu);
0085 return -EINVAL;
0086 }
0087
0088 if (!policy->cur) {
0089 printk(KERN_ERR "no cpu specified in policy\n");
0090 return -EINVAL;
0091 }
0092
0093
0094 for_each_cpu(i, policy->cpus) {
0095 affected_info = &per_cpu(spu_gov_info, i);
0096 affected_info->policy = policy;
0097 }
0098
0099 info->poll_int = POLL_TIME;
0100
0101
0102 spu_gov_init_work(info);
0103
0104 return 0;
0105 }
0106
0107 static void spu_gov_stop(struct cpufreq_policy *policy)
0108 {
0109 unsigned int cpu = policy->cpu;
0110 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
0111 int i;
0112
0113
0114 spu_gov_cancel_work(info);
0115
0116
0117 for_each_cpu (i, policy->cpus) {
0118 info = &per_cpu(spu_gov_info, i);
0119 info->policy = NULL;
0120 }
0121 }
0122
0123 static struct cpufreq_governor spu_governor = {
0124 .name = "spudemand",
0125 .start = spu_gov_start,
0126 .stop = spu_gov_stop,
0127 .owner = THIS_MODULE,
0128 };
0129 cpufreq_governor_init(spu_governor);
0130 cpufreq_governor_exit(spu_governor);
0131
0132 MODULE_LICENSE("GPL");
0133 MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");