Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2007 MIPS Technologies, Inc.
0007  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
0008  */
0009 #include <linux/clockchips.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/cpufreq.h>
0012 #include <linux/percpu.h>
0013 #include <linux/smp.h>
0014 #include <linux/irq.h>
0015 
0016 #include <asm/time.h>
0017 #include <asm/cevt-r4k.h>
0018 
0019 static int mips_next_event(unsigned long delta,
0020                struct clock_event_device *evt)
0021 {
0022     unsigned int cnt;
0023     int res;
0024 
0025     cnt = read_c0_count();
0026     cnt += delta;
0027     write_c0_compare(cnt);
0028     res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
0029     return res;
0030 }
0031 
0032 /**
0033  * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
0034  *
0035  * Running under virtualisation can introduce overhead into mips_next_event() in
0036  * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
0037  * potentially with an unnatural frequency, which makes a fixed min_delta_ns
0038  * value inappropriate as it may be too small.
0039  *
0040  * It can also introduce occasional latency from the guest being descheduled.
0041  *
0042  * This function calculates a good minimum delta based roughly on the 75th
0043  * percentile of the time taken to do the mips_next_event() sequence, in order
0044  * to handle potentially higher overhead while also eliminating outliers due to
0045  * unpredictable hypervisor latency (which can be handled by retries).
0046  *
0047  * Return:  An appropriate minimum delta for the clock event device.
0048  */
0049 static unsigned int calculate_min_delta(void)
0050 {
0051     unsigned int cnt, i, j, k, l;
0052     unsigned int buf1[4], buf2[3];
0053     unsigned int min_delta;
0054 
0055     /*
0056      * Calculate the median of 5 75th percentiles of 5 samples of how long
0057      * it takes to set CP0_Compare = CP0_Count + delta.
0058      */
0059     for (i = 0; i < 5; ++i) {
0060         for (j = 0; j < 5; ++j) {
0061             /*
0062              * This is like the code in mips_next_event(), and
0063              * directly measures the borderline "safe" delta.
0064              */
0065             cnt = read_c0_count();
0066             write_c0_compare(cnt);
0067             cnt = read_c0_count() - cnt;
0068 
0069             /* Sorted insert into buf1 */
0070             for (k = 0; k < j; ++k) {
0071                 if (cnt < buf1[k]) {
0072                     l = min_t(unsigned int,
0073                           j, ARRAY_SIZE(buf1) - 1);
0074                     for (; l > k; --l)
0075                         buf1[l] = buf1[l - 1];
0076                     break;
0077                 }
0078             }
0079             if (k < ARRAY_SIZE(buf1))
0080                 buf1[k] = cnt;
0081         }
0082 
0083         /* Sorted insert of 75th percentile into buf2 */
0084         for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
0085             if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
0086                 l = min_t(unsigned int,
0087                       i, ARRAY_SIZE(buf2) - 1);
0088                 for (; l > k; --l)
0089                     buf2[l] = buf2[l - 1];
0090                 break;
0091             }
0092         }
0093         if (k < ARRAY_SIZE(buf2))
0094             buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
0095     }
0096 
0097     /* Use 2 * median of 75th percentiles */
0098     min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
0099 
0100     /* Don't go too low */
0101     if (min_delta < 0x300)
0102         min_delta = 0x300;
0103 
0104     pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
0105          __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
0106     return min_delta;
0107 }
0108 
0109 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
0110 int cp0_timer_irq_installed;
0111 
0112 /*
0113  * Possibly handle a performance counter interrupt.
0114  * Return true if the timer interrupt should not be checked
0115  */
0116 static inline int handle_perf_irq(int r2)
0117 {
0118     /*
0119      * The performance counter overflow interrupt may be shared with the
0120      * timer interrupt (cp0_perfcount_irq < 0). If it is and a
0121      * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
0122      * and we can't reliably determine if a counter interrupt has also
0123      * happened (!r2) then don't check for a timer interrupt.
0124      */
0125     return (cp0_perfcount_irq < 0) &&
0126         perf_irq() == IRQ_HANDLED &&
0127         !r2;
0128 }
0129 
0130 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
0131 {
0132     const int r2 = cpu_has_mips_r2_r6;
0133     struct clock_event_device *cd;
0134     int cpu = smp_processor_id();
0135 
0136     /*
0137      * Suckage alert:
0138      * Before R2 of the architecture there was no way to see if a
0139      * performance counter interrupt was pending, so we have to run
0140      * the performance counter interrupt handler anyway.
0141      */
0142     if (handle_perf_irq(r2))
0143         return IRQ_HANDLED;
0144 
0145     /*
0146      * The same applies to performance counter interrupts.  But with the
0147      * above we now know that the reason we got here must be a timer
0148      * interrupt.  Being the paranoiacs we are we check anyway.
0149      */
0150     if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
0151         /* Clear Count/Compare Interrupt */
0152         write_c0_compare(read_c0_compare());
0153         cd = &per_cpu(mips_clockevent_device, cpu);
0154         cd->event_handler(cd);
0155 
0156         return IRQ_HANDLED;
0157     }
0158 
0159     return IRQ_NONE;
0160 }
0161 
0162 struct irqaction c0_compare_irqaction = {
0163     .handler = c0_compare_interrupt,
0164     /*
0165      * IRQF_SHARED: The timer interrupt may be shared with other interrupts
0166      * such as perf counter and FDC interrupts.
0167      */
0168     .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
0169     .name = "timer",
0170 };
0171 
0172 
0173 void mips_event_handler(struct clock_event_device *dev)
0174 {
0175 }
0176 
0177 /*
0178  * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
0179  */
0180 static int c0_compare_int_pending(void)
0181 {
0182     /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
0183     return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
0184 }
0185 
0186 /*
0187  * Compare interrupt can be routed and latched outside the core,
0188  * so wait up to worst case number of cycle counter ticks for timer interrupt
0189  * changes to propagate to the cause register.
0190  */
0191 #define COMPARE_INT_SEEN_TICKS 50
0192 
0193 int c0_compare_int_usable(void)
0194 {
0195     unsigned int delta;
0196     unsigned int cnt;
0197 
0198     /*
0199      * IP7 already pending?  Try to clear it by acking the timer.
0200      */
0201     if (c0_compare_int_pending()) {
0202         cnt = read_c0_count();
0203         write_c0_compare(cnt);
0204         back_to_back_c0_hazard();
0205         while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
0206             if (!c0_compare_int_pending())
0207                 break;
0208         if (c0_compare_int_pending())
0209             return 0;
0210     }
0211 
0212     for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
0213         cnt = read_c0_count();
0214         cnt += delta;
0215         write_c0_compare(cnt);
0216         back_to_back_c0_hazard();
0217         if ((int)(read_c0_count() - cnt) < 0)
0218             break;
0219         /* increase delta if the timer was already expired */
0220     }
0221 
0222     while ((int)(read_c0_count() - cnt) <= 0)
0223         ;   /* Wait for expiry  */
0224 
0225     while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
0226         if (c0_compare_int_pending())
0227             break;
0228     if (!c0_compare_int_pending())
0229         return 0;
0230     cnt = read_c0_count();
0231     write_c0_compare(cnt);
0232     back_to_back_c0_hazard();
0233     while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
0234         if (!c0_compare_int_pending())
0235             break;
0236     if (c0_compare_int_pending())
0237         return 0;
0238 
0239     /*
0240      * Feels like a real count / compare timer.
0241      */
0242     return 1;
0243 }
0244 
0245 unsigned int __weak get_c0_compare_int(void)
0246 {
0247     return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
0248 }
0249 
0250 #ifdef CONFIG_CPU_FREQ
0251 
0252 static unsigned long mips_ref_freq;
0253 
0254 static int r4k_cpufreq_callback(struct notifier_block *nb,
0255                 unsigned long val, void *data)
0256 {
0257     struct cpufreq_freqs *freq = data;
0258     struct clock_event_device *cd;
0259     unsigned long rate;
0260     int cpu;
0261 
0262     if (!mips_ref_freq)
0263         mips_ref_freq = freq->old;
0264 
0265     if (val == CPUFREQ_POSTCHANGE) {
0266         rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq,
0267                      freq->new);
0268 
0269         for_each_cpu(cpu, freq->policy->cpus) {
0270             cd = &per_cpu(mips_clockevent_device, cpu);
0271 
0272             clockevents_update_freq(cd, rate);
0273         }
0274     }
0275 
0276     return 0;
0277 }
0278 
0279 static struct notifier_block r4k_cpufreq_notifier = {
0280     .notifier_call  = r4k_cpufreq_callback,
0281 };
0282 
0283 static int __init r4k_register_cpufreq_notifier(void)
0284 {
0285     return cpufreq_register_notifier(&r4k_cpufreq_notifier,
0286                      CPUFREQ_TRANSITION_NOTIFIER);
0287 
0288 }
0289 core_initcall(r4k_register_cpufreq_notifier);
0290 
0291 #endif /* !CONFIG_CPU_FREQ */
0292 
0293 int r4k_clockevent_init(void)
0294 {
0295     unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED;
0296     unsigned int cpu = smp_processor_id();
0297     struct clock_event_device *cd;
0298     unsigned int irq, min_delta;
0299 
0300     if (!cpu_has_counter || !mips_hpt_frequency)
0301         return -ENXIO;
0302 
0303     if (!c0_compare_int_usable())
0304         return -ENXIO;
0305 
0306     /*
0307      * With vectored interrupts things are getting platform specific.
0308      * get_c0_compare_int is a hook to allow a platform to return the
0309      * interrupt number of its liking.
0310      */
0311     irq = get_c0_compare_int();
0312 
0313     cd = &per_cpu(mips_clockevent_device, cpu);
0314 
0315     cd->name        = "MIPS";
0316     cd->features        = CLOCK_EVT_FEAT_ONESHOT |
0317                   CLOCK_EVT_FEAT_C3STOP |
0318                   CLOCK_EVT_FEAT_PERCPU;
0319 
0320     min_delta       = calculate_min_delta();
0321 
0322     cd->rating      = 300;
0323     cd->irq         = irq;
0324     cd->cpumask     = cpumask_of(cpu);
0325     cd->set_next_event  = mips_next_event;
0326     cd->event_handler   = mips_event_handler;
0327 
0328     clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
0329 
0330     if (cp0_timer_irq_installed)
0331         return 0;
0332 
0333     cp0_timer_irq_installed = 1;
0334 
0335     if (request_irq(irq, c0_compare_interrupt, flags, "timer",
0336             c0_compare_interrupt))
0337         pr_err("Failed to request irq %d (timer)\n", irq);
0338 
0339     return 0;
0340 }
0341