Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* time.c: UltraSparc timer and TOD clock support.
0003  *
0004  * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
0005  * Copyright (C) 1998 Eddie C. Dost   (ecd@skynet.be)
0006  *
0007  * Based largely on code which is:
0008  *
0009  * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
0010  */
0011 
0012 #include <linux/errno.h>
0013 #include <linux/export.h>
0014 #include <linux/sched.h>
0015 #include <linux/kernel.h>
0016 #include <linux/param.h>
0017 #include <linux/string.h>
0018 #include <linux/mm.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/time.h>
0021 #include <linux/timex.h>
0022 #include <linux/init.h>
0023 #include <linux/ioport.h>
0024 #include <linux/mc146818rtc.h>
0025 #include <linux/delay.h>
0026 #include <linux/profile.h>
0027 #include <linux/bcd.h>
0028 #include <linux/jiffies.h>
0029 #include <linux/cpufreq.h>
0030 #include <linux/percpu.h>
0031 #include <linux/rtc/m48t59.h>
0032 #include <linux/kernel_stat.h>
0033 #include <linux/clockchips.h>
0034 #include <linux/clocksource.h>
0035 #include <linux/platform_device.h>
0036 #include <linux/ftrace.h>
0037 
0038 #include <asm/oplib.h>
0039 #include <asm/timer.h>
0040 #include <asm/irq.h>
0041 #include <asm/io.h>
0042 #include <asm/prom.h>
0043 #include <asm/starfire.h>
0044 #include <asm/smp.h>
0045 #include <asm/sections.h>
0046 #include <asm/cpudata.h>
0047 #include <linux/uaccess.h>
0048 #include <asm/irq_regs.h>
0049 #include <asm/cacheflush.h>
0050 
0051 #include "entry.h"
0052 #include "kernel.h"
0053 
0054 DEFINE_SPINLOCK(rtc_lock);
0055 
0056 #ifdef CONFIG_SMP
0057 unsigned long profile_pc(struct pt_regs *regs)
0058 {
0059     unsigned long pc = instruction_pointer(regs);
0060 
0061     if (in_lock_functions(pc))
0062         return regs->u_regs[UREG_RETPC];
0063     return pc;
0064 }
0065 EXPORT_SYMBOL(profile_pc);
0066 #endif
0067 
0068 static void tick_disable_protection(void)
0069 {
0070     /* Set things up so user can access tick register for profiling
0071      * purposes.  Also workaround BB_ERRATA_1 by doing a dummy
0072      * read back of %tick after writing it.
0073      */
0074     __asm__ __volatile__(
0075     "   ba,pt   %%xcc, 1f\n"
0076     "    nop\n"
0077     "   .align  64\n"
0078     "1: rd  %%tick, %%g2\n"
0079     "   add %%g2, 6, %%g2\n"
0080     "   andn    %%g2, %0, %%g2\n"
0081     "   wrpr    %%g2, 0, %%tick\n"
0082     "   rdpr    %%tick, %%g0"
0083     : /* no outputs */
0084     : "r" (TICK_PRIV_BIT)
0085     : "g2");
0086 }
0087 
0088 static void tick_disable_irq(void)
0089 {
0090     __asm__ __volatile__(
0091     "   ba,pt   %%xcc, 1f\n"
0092     "    nop\n"
0093     "   .align  64\n"
0094     "1: wr  %0, 0x0, %%tick_cmpr\n"
0095     "   rd  %%tick_cmpr, %%g0"
0096     : /* no outputs */
0097     : "r" (TICKCMP_IRQ_BIT));
0098 }
0099 
0100 static void tick_init_tick(void)
0101 {
0102     tick_disable_protection();
0103     tick_disable_irq();
0104 }
0105 
0106 static unsigned long long tick_get_tick(void)
0107 {
0108     unsigned long ret;
0109 
0110     __asm__ __volatile__("rd    %%tick, %0\n\t"
0111                  "mov   %0, %0"
0112                  : "=r" (ret));
0113 
0114     return ret & ~TICK_PRIV_BIT;
0115 }
0116 
0117 static int tick_add_compare(unsigned long adj)
0118 {
0119     unsigned long orig_tick, new_tick, new_compare;
0120 
0121     __asm__ __volatile__("rd    %%tick, %0"
0122                  : "=r" (orig_tick));
0123 
0124     orig_tick &= ~TICKCMP_IRQ_BIT;
0125 
0126     /* Workaround for Spitfire Errata (#54 I think??), I discovered
0127      * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
0128      * number 103640.
0129      *
0130      * On Blackbird writes to %tick_cmpr can fail, the
0131      * workaround seems to be to execute the wr instruction
0132      * at the start of an I-cache line, and perform a dummy
0133      * read back from %tick_cmpr right after writing to it. -DaveM
0134      */
0135     __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
0136                  " add  %1, %2, %0\n\t"
0137                  ".align    64\n"
0138                  "1:\n\t"
0139                  "wr    %0, 0, %%tick_cmpr\n\t"
0140                  "rd    %%tick_cmpr, %%g0\n\t"
0141                  : "=r" (new_compare)
0142                  : "r" (orig_tick), "r" (adj));
0143 
0144     __asm__ __volatile__("rd    %%tick, %0"
0145                  : "=r" (new_tick));
0146     new_tick &= ~TICKCMP_IRQ_BIT;
0147 
0148     return ((long)(new_tick - (orig_tick+adj))) > 0L;
0149 }
0150 
0151 static unsigned long tick_add_tick(unsigned long adj)
0152 {
0153     unsigned long new_tick;
0154 
0155     /* Also need to handle Blackbird bug here too. */
0156     __asm__ __volatile__("rd    %%tick, %0\n\t"
0157                  "add   %0, %1, %0\n\t"
0158                  "wrpr  %0, 0, %%tick\n\t"
0159                  : "=&r" (new_tick)
0160                  : "r" (adj));
0161 
0162     return new_tick;
0163 }
0164 
0165 /* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
0166 static unsigned long cpuid_to_freq(phandle node, int cpuid)
0167 {
0168     bool is_cpu_node = false;
0169     unsigned long freq = 0;
0170     char type[128];
0171 
0172     if (!node)
0173         return freq;
0174 
0175     if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
0176         is_cpu_node = (strcmp(type, "cpu") == 0);
0177 
0178     /* try upa-portid then cpuid to get cpuid, see prom_64.c */
0179     if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
0180                 prom_getint(node, "cpuid") == cpuid))
0181         freq = prom_getintdefault(node, "clock-frequency", 0);
0182     if (!freq)
0183         freq = cpuid_to_freq(prom_getchild(node), cpuid);
0184     if (!freq)
0185         freq = cpuid_to_freq(prom_getsibling(node), cpuid);
0186 
0187     return freq;
0188 }
0189 
0190 static unsigned long tick_get_frequency(void)
0191 {
0192     return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
0193 }
0194 
0195 static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
0196     .name       =   "tick",
0197     .init_tick  =   tick_init_tick,
0198     .disable_irq    =   tick_disable_irq,
0199     .get_tick   =   tick_get_tick,
0200     .add_tick   =   tick_add_tick,
0201     .add_compare    =   tick_add_compare,
0202     .get_frequency  =   tick_get_frequency,
0203     .softint_mask   =   1UL << 0,
0204 };
0205 
0206 struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
0207 EXPORT_SYMBOL(tick_ops);
0208 
0209 static void stick_disable_irq(void)
0210 {
0211     __asm__ __volatile__(
0212     "wr %0, 0x0, %%asr25"
0213     : /* no outputs */
0214     : "r" (TICKCMP_IRQ_BIT));
0215 }
0216 
0217 static void stick_init_tick(void)
0218 {
0219     /* Writes to the %tick and %stick register are not
0220      * allowed on sun4v.  The Hypervisor controls that
0221      * bit, per-strand.
0222      */
0223     if (tlb_type != hypervisor) {
0224         tick_disable_protection();
0225         tick_disable_irq();
0226 
0227         /* Let the user get at STICK too. */
0228         __asm__ __volatile__(
0229         "   rd  %%asr24, %%g2\n"
0230         "   andn    %%g2, %0, %%g2\n"
0231         "   wr  %%g2, 0, %%asr24"
0232         : /* no outputs */
0233         : "r" (TICK_PRIV_BIT)
0234         : "g1", "g2");
0235     }
0236 
0237     stick_disable_irq();
0238 }
0239 
0240 static unsigned long long stick_get_tick(void)
0241 {
0242     unsigned long ret;
0243 
0244     __asm__ __volatile__("rd    %%asr24, %0"
0245                  : "=r" (ret));
0246 
0247     return ret & ~TICK_PRIV_BIT;
0248 }
0249 
0250 static unsigned long stick_add_tick(unsigned long adj)
0251 {
0252     unsigned long new_tick;
0253 
0254     __asm__ __volatile__("rd    %%asr24, %0\n\t"
0255                  "add   %0, %1, %0\n\t"
0256                  "wr    %0, 0, %%asr24\n\t"
0257                  : "=&r" (new_tick)
0258                  : "r" (adj));
0259 
0260     return new_tick;
0261 }
0262 
0263 static int stick_add_compare(unsigned long adj)
0264 {
0265     unsigned long orig_tick, new_tick;
0266 
0267     __asm__ __volatile__("rd    %%asr24, %0"
0268                  : "=r" (orig_tick));
0269     orig_tick &= ~TICKCMP_IRQ_BIT;
0270 
0271     __asm__ __volatile__("wr    %0, 0, %%asr25"
0272                  : /* no outputs */
0273                  : "r" (orig_tick + adj));
0274 
0275     __asm__ __volatile__("rd    %%asr24, %0"
0276                  : "=r" (new_tick));
0277     new_tick &= ~TICKCMP_IRQ_BIT;
0278 
0279     return ((long)(new_tick - (orig_tick+adj))) > 0L;
0280 }
0281 
0282 static unsigned long stick_get_frequency(void)
0283 {
0284     return prom_getintdefault(prom_root_node, "stick-frequency", 0);
0285 }
0286 
0287 static struct sparc64_tick_ops stick_operations __read_mostly = {
0288     .name       =   "stick",
0289     .init_tick  =   stick_init_tick,
0290     .disable_irq    =   stick_disable_irq,
0291     .get_tick   =   stick_get_tick,
0292     .add_tick   =   stick_add_tick,
0293     .add_compare    =   stick_add_compare,
0294     .get_frequency  =   stick_get_frequency,
0295     .softint_mask   =   1UL << 16,
0296 };
0297 
0298 /* On Hummingbird the STICK/STICK_CMPR register is implemented
0299  * in I/O space.  There are two 64-bit registers each, the
0300  * first holds the low 32-bits of the value and the second holds
0301  * the high 32-bits.
0302  *
0303  * Since STICK is constantly updating, we have to access it carefully.
0304  *
0305  * The sequence we use to read is:
0306  * 1) read high
0307  * 2) read low
0308  * 3) read high again, if it rolled re-read both low and high again.
0309  *
0310  * Writing STICK safely is also tricky:
0311  * 1) write low to zero
0312  * 2) write high
0313  * 3) write low
0314  */
0315 static unsigned long __hbird_read_stick(void)
0316 {
0317     unsigned long ret, tmp1, tmp2, tmp3;
0318     unsigned long addr = HBIRD_STICK_ADDR+8;
0319 
0320     __asm__ __volatile__("ldxa  [%1] %5, %2\n"
0321                  "1:\n\t"
0322                  "sub   %1, 0x8, %1\n\t"
0323                  "ldxa  [%1] %5, %3\n\t"
0324                  "add   %1, 0x8, %1\n\t"
0325                  "ldxa  [%1] %5, %4\n\t"
0326                  "cmp   %4, %2\n\t"
0327                  "bne,a,pn  %%xcc, 1b\n\t"
0328                  " mov  %4, %2\n\t"
0329                  "sllx  %4, 32, %4\n\t"
0330                  "or    %3, %4, %0\n\t"
0331                  : "=&r" (ret), "=&r" (addr),
0332                    "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
0333                  : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
0334 
0335     return ret;
0336 }
0337 
0338 static void __hbird_write_stick(unsigned long val)
0339 {
0340     unsigned long low = (val & 0xffffffffUL);
0341     unsigned long high = (val >> 32UL);
0342     unsigned long addr = HBIRD_STICK_ADDR;
0343 
0344     __asm__ __volatile__("stxa  %%g0, [%0] %4\n\t"
0345                  "add   %0, 0x8, %0\n\t"
0346                  "stxa  %3, [%0] %4\n\t"
0347                  "sub   %0, 0x8, %0\n\t"
0348                  "stxa  %2, [%0] %4"
0349                  : "=&r" (addr)
0350                  : "0" (addr), "r" (low), "r" (high),
0351                    "i" (ASI_PHYS_BYPASS_EC_E));
0352 }
0353 
0354 static void __hbird_write_compare(unsigned long val)
0355 {
0356     unsigned long low = (val & 0xffffffffUL);
0357     unsigned long high = (val >> 32UL);
0358     unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
0359 
0360     __asm__ __volatile__("stxa  %3, [%0] %4\n\t"
0361                  "sub   %0, 0x8, %0\n\t"
0362                  "stxa  %2, [%0] %4"
0363                  : "=&r" (addr)
0364                  : "0" (addr), "r" (low), "r" (high),
0365                    "i" (ASI_PHYS_BYPASS_EC_E));
0366 }
0367 
0368 static void hbtick_disable_irq(void)
0369 {
0370     __hbird_write_compare(TICKCMP_IRQ_BIT);
0371 }
0372 
0373 static void hbtick_init_tick(void)
0374 {
0375     tick_disable_protection();
0376 
0377     /* XXX This seems to be necessary to 'jumpstart' Hummingbird
0378      * XXX into actually sending STICK interrupts.  I think because
0379      * XXX of how we store %tick_cmpr in head.S this somehow resets the
0380      * XXX {TICK + STICK} interrupt mux.  -DaveM
0381      */
0382     __hbird_write_stick(__hbird_read_stick());
0383 
0384     hbtick_disable_irq();
0385 }
0386 
0387 static unsigned long long hbtick_get_tick(void)
0388 {
0389     return __hbird_read_stick() & ~TICK_PRIV_BIT;
0390 }
0391 
0392 static unsigned long hbtick_add_tick(unsigned long adj)
0393 {
0394     unsigned long val;
0395 
0396     val = __hbird_read_stick() + adj;
0397     __hbird_write_stick(val);
0398 
0399     return val;
0400 }
0401 
0402 static int hbtick_add_compare(unsigned long adj)
0403 {
0404     unsigned long val = __hbird_read_stick();
0405     unsigned long val2;
0406 
0407     val &= ~TICKCMP_IRQ_BIT;
0408     val += adj;
0409     __hbird_write_compare(val);
0410 
0411     val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
0412 
0413     return ((long)(val2 - val)) > 0L;
0414 }
0415 
0416 static unsigned long hbtick_get_frequency(void)
0417 {
0418     return prom_getintdefault(prom_root_node, "stick-frequency", 0);
0419 }
0420 
0421 static struct sparc64_tick_ops hbtick_operations __read_mostly = {
0422     .name       =   "hbtick",
0423     .init_tick  =   hbtick_init_tick,
0424     .disable_irq    =   hbtick_disable_irq,
0425     .get_tick   =   hbtick_get_tick,
0426     .add_tick   =   hbtick_add_tick,
0427     .add_compare    =   hbtick_add_compare,
0428     .get_frequency  =   hbtick_get_frequency,
0429     .softint_mask   =   1UL << 0,
0430 };
0431 
0432 unsigned long cmos_regs;
0433 EXPORT_SYMBOL(cmos_regs);
0434 
0435 static struct resource rtc_cmos_resource;
0436 
0437 static struct platform_device rtc_cmos_device = {
0438     .name       = "rtc_cmos",
0439     .id     = -1,
0440     .resource   = &rtc_cmos_resource,
0441     .num_resources  = 1,
0442 };
0443 
0444 static int rtc_probe(struct platform_device *op)
0445 {
0446     struct resource *r;
0447 
0448     printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n",
0449            op->dev.of_node, op->resource[0].start);
0450 
0451     /* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
0452      * up a fake resource so that the probe works for all cases.
0453      * When the RTC is behind an ISA bus it will have IORESOURCE_IO
0454      * already, whereas when it's behind EBUS is will be IORESOURCE_MEM.
0455      */
0456 
0457     r = &rtc_cmos_resource;
0458     r->flags = IORESOURCE_IO;
0459     r->name = op->resource[0].name;
0460     r->start = op->resource[0].start;
0461     r->end = op->resource[0].end;
0462 
0463     cmos_regs = op->resource[0].start;
0464     return platform_device_register(&rtc_cmos_device);
0465 }
0466 
0467 static const struct of_device_id rtc_match[] = {
0468     {
0469         .name = "rtc",
0470         .compatible = "m5819",
0471     },
0472     {
0473         .name = "rtc",
0474         .compatible = "isa-m5819p",
0475     },
0476     {
0477         .name = "rtc",
0478         .compatible = "isa-m5823p",
0479     },
0480     {
0481         .name = "rtc",
0482         .compatible = "ds1287",
0483     },
0484     {},
0485 };
0486 
0487 static struct platform_driver rtc_driver = {
0488     .probe      = rtc_probe,
0489     .driver = {
0490         .name = "rtc",
0491         .of_match_table = rtc_match,
0492     },
0493 };
0494 
0495 static struct platform_device rtc_bq4802_device = {
0496     .name       = "rtc-bq4802",
0497     .id     = -1,
0498     .num_resources  = 1,
0499 };
0500 
0501 static int bq4802_probe(struct platform_device *op)
0502 {
0503 
0504     printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n",
0505            op->dev.of_node, op->resource[0].start);
0506 
0507     rtc_bq4802_device.resource = &op->resource[0];
0508     return platform_device_register(&rtc_bq4802_device);
0509 }
0510 
0511 static const struct of_device_id bq4802_match[] = {
0512     {
0513         .name = "rtc",
0514         .compatible = "bq4802",
0515     },
0516     {},
0517 };
0518 
0519 static struct platform_driver bq4802_driver = {
0520     .probe      = bq4802_probe,
0521     .driver = {
0522         .name = "bq4802",
0523         .of_match_table = bq4802_match,
0524     },
0525 };
0526 
0527 static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
0528 {
0529     struct platform_device *pdev = to_platform_device(dev);
0530     void __iomem *regs = (void __iomem *) pdev->resource[0].start;
0531 
0532     return readb(regs + ofs);
0533 }
0534 
0535 static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
0536 {
0537     struct platform_device *pdev = to_platform_device(dev);
0538     void __iomem *regs = (void __iomem *) pdev->resource[0].start;
0539 
0540     writeb(val, regs + ofs);
0541 }
0542 
0543 static struct m48t59_plat_data m48t59_data = {
0544     .read_byte  = mostek_read_byte,
0545     .write_byte = mostek_write_byte,
0546 };
0547 
0548 static struct platform_device m48t59_rtc = {
0549     .name       = "rtc-m48t59",
0550     .id     = 0,
0551     .num_resources  = 1,
0552     .dev    = {
0553         .platform_data = &m48t59_data,
0554     },
0555 };
0556 
0557 static int mostek_probe(struct platform_device *op)
0558 {
0559     struct device_node *dp = op->dev.of_node;
0560 
0561     /* On an Enterprise system there can be multiple mostek clocks.
0562      * We should only match the one that is on the central FHC bus.
0563      */
0564     if (of_node_name_eq(dp->parent, "fhc") &&
0565         !of_node_name_eq(dp->parent->parent, "central"))
0566         return -ENODEV;
0567 
0568     printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n",
0569            dp, op->resource[0].start);
0570 
0571     m48t59_rtc.resource = &op->resource[0];
0572     return platform_device_register(&m48t59_rtc);
0573 }
0574 
0575 static const struct of_device_id mostek_match[] = {
0576     {
0577         .name = "eeprom",
0578     },
0579     {},
0580 };
0581 
0582 static struct platform_driver mostek_driver = {
0583     .probe      = mostek_probe,
0584     .driver = {
0585         .name = "mostek",
0586         .of_match_table = mostek_match,
0587     },
0588 };
0589 
0590 static struct platform_device rtc_sun4v_device = {
0591     .name       = "rtc-sun4v",
0592     .id     = -1,
0593 };
0594 
0595 static struct platform_device rtc_starfire_device = {
0596     .name       = "rtc-starfire",
0597     .id     = -1,
0598 };
0599 
0600 static int __init clock_init(void)
0601 {
0602     if (this_is_starfire)
0603         return platform_device_register(&rtc_starfire_device);
0604 
0605     if (tlb_type == hypervisor)
0606         return platform_device_register(&rtc_sun4v_device);
0607 
0608     (void) platform_driver_register(&rtc_driver);
0609     (void) platform_driver_register(&mostek_driver);
0610     (void) platform_driver_register(&bq4802_driver);
0611 
0612     return 0;
0613 }
0614 
0615 /* Must be after subsys_initcall() so that busses are probed.  Must
0616  * be before device_initcall() because things like the RTC driver
0617  * need to see the clock registers.
0618  */
0619 fs_initcall(clock_init);
0620 
0621 /* Return true if this is Hummingbird, aka Ultra-IIe */
0622 static bool is_hummingbird(void)
0623 {
0624     unsigned long ver, manuf, impl;
0625 
0626     __asm__ __volatile__ ("rdpr %%ver, %0"
0627                   : "=&r" (ver));
0628     manuf = ((ver >> 48) & 0xffff);
0629     impl = ((ver >> 32) & 0xffff);
0630 
0631     return (manuf == 0x17 && impl == 0x13);
0632 }
0633 
0634 struct freq_table {
0635     unsigned long clock_tick_ref;
0636     unsigned int ref_freq;
0637 };
0638 static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
0639 
0640 unsigned long sparc64_get_clock_tick(unsigned int cpu)
0641 {
0642     struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
0643 
0644     if (ft->clock_tick_ref)
0645         return ft->clock_tick_ref;
0646     return cpu_data(cpu).clock_tick;
0647 }
0648 EXPORT_SYMBOL(sparc64_get_clock_tick);
0649 
0650 #ifdef CONFIG_CPU_FREQ
0651 
0652 static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
0653                     void *data)
0654 {
0655     struct cpufreq_freqs *freq = data;
0656     unsigned int cpu;
0657     struct freq_table *ft;
0658 
0659     for_each_cpu(cpu, freq->policy->cpus) {
0660         ft = &per_cpu(sparc64_freq_table, cpu);
0661 
0662         if (!ft->ref_freq) {
0663             ft->ref_freq = freq->old;
0664             ft->clock_tick_ref = cpu_data(cpu).clock_tick;
0665         }
0666 
0667         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
0668             (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
0669             cpu_data(cpu).clock_tick =
0670                 cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
0671                           freq->new);
0672         }
0673     }
0674 
0675     return 0;
0676 }
0677 
0678 static struct notifier_block sparc64_cpufreq_notifier_block = {
0679     .notifier_call  = sparc64_cpufreq_notifier
0680 };
0681 
0682 static int __init register_sparc64_cpufreq_notifier(void)
0683 {
0684 
0685     cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
0686                   CPUFREQ_TRANSITION_NOTIFIER);
0687     return 0;
0688 }
0689 
0690 core_initcall(register_sparc64_cpufreq_notifier);
0691 
0692 #endif /* CONFIG_CPU_FREQ */
0693 
0694 static int sparc64_next_event(unsigned long delta,
0695                   struct clock_event_device *evt)
0696 {
0697     return tick_operations.add_compare(delta) ? -ETIME : 0;
0698 }
0699 
0700 static int sparc64_timer_shutdown(struct clock_event_device *evt)
0701 {
0702     tick_operations.disable_irq();
0703     return 0;
0704 }
0705 
0706 static struct clock_event_device sparc64_clockevent = {
0707     .features       = CLOCK_EVT_FEAT_ONESHOT,
0708     .set_state_shutdown = sparc64_timer_shutdown,
0709     .set_next_event     = sparc64_next_event,
0710     .rating         = 100,
0711     .shift          = 30,
0712     .irq            = -1,
0713 };
0714 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
0715 
0716 void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
0717 {
0718     struct pt_regs *old_regs = set_irq_regs(regs);
0719     unsigned long tick_mask = tick_operations.softint_mask;
0720     int cpu = smp_processor_id();
0721     struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
0722 
0723     clear_softint(tick_mask);
0724 
0725     irq_enter();
0726 
0727     local_cpu_data().irq0_irqs++;
0728     kstat_incr_irq_this_cpu(0);
0729 
0730     if (unlikely(!evt->event_handler)) {
0731         printk(KERN_WARNING
0732                "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
0733     } else
0734         evt->event_handler(evt);
0735 
0736     irq_exit();
0737 
0738     set_irq_regs(old_regs);
0739 }
0740 
0741 void setup_sparc64_timer(void)
0742 {
0743     struct clock_event_device *sevt;
0744     unsigned long pstate;
0745 
0746     /* Guarantee that the following sequences execute
0747      * uninterrupted.
0748      */
0749     __asm__ __volatile__("rdpr  %%pstate, %0\n\t"
0750                  "wrpr  %0, %1, %%pstate"
0751                  : "=r" (pstate)
0752                  : "i" (PSTATE_IE));
0753 
0754     tick_operations.init_tick();
0755 
0756     /* Restore PSTATE_IE. */
0757     __asm__ __volatile__("wrpr  %0, 0x0, %%pstate"
0758                  : /* no outputs */
0759                  : "r" (pstate));
0760 
0761     sevt = this_cpu_ptr(&sparc64_events);
0762 
0763     memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
0764     sevt->cpumask = cpumask_of(smp_processor_id());
0765 
0766     clockevents_register_device(sevt);
0767 }
0768 
0769 #define SPARC64_NSEC_PER_CYC_SHIFT  10UL
0770 
0771 static struct clocksource clocksource_tick = {
0772     .rating     = 100,
0773     .mask       = CLOCKSOURCE_MASK(64),
0774     .flags      = CLOCK_SOURCE_IS_CONTINUOUS,
0775 };
0776 
0777 static unsigned long tb_ticks_per_usec __read_mostly;
0778 
0779 void __delay(unsigned long loops)
0780 {
0781     unsigned long bclock = get_tick();
0782 
0783     while ((get_tick() - bclock) < loops)
0784         ;
0785 }
0786 EXPORT_SYMBOL(__delay);
0787 
0788 void udelay(unsigned long usecs)
0789 {
0790     __delay(tb_ticks_per_usec * usecs);
0791 }
0792 EXPORT_SYMBOL(udelay);
0793 
0794 static u64 clocksource_tick_read(struct clocksource *cs)
0795 {
0796     return get_tick();
0797 }
0798 
0799 static void __init get_tick_patch(void)
0800 {
0801     unsigned int *addr, *instr, i;
0802     struct get_tick_patch *p;
0803 
0804     if (tlb_type == spitfire && is_hummingbird())
0805         return;
0806 
0807     for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
0808         instr = (tlb_type == spitfire) ? p->tick : p->stick;
0809         addr = (unsigned int *)(unsigned long)p->addr;
0810         for (i = 0; i < GET_TICK_NINSTR; i++) {
0811             addr[i] = instr[i];
0812             /* ensure that address is modified before flush */
0813             wmb();
0814             flushi(&addr[i]);
0815         }
0816     }
0817 }
0818 
0819 static void __init init_tick_ops(struct sparc64_tick_ops *ops)
0820 {
0821     unsigned long freq, quotient, tick;
0822 
0823     freq = ops->get_frequency();
0824     quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
0825     tick = ops->get_tick();
0826 
0827     ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
0828     ops->ticks_per_nsec_quotient = quotient;
0829     ops->frequency = freq;
0830     tick_operations = *ops;
0831     get_tick_patch();
0832 }
0833 
0834 void __init time_init_early(void)
0835 {
0836     if (tlb_type == spitfire) {
0837         if (is_hummingbird()) {
0838             init_tick_ops(&hbtick_operations);
0839             clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
0840         } else {
0841             init_tick_ops(&tick_operations);
0842             clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
0843         }
0844     } else {
0845         init_tick_ops(&stick_operations);
0846         clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
0847     }
0848 }
0849 
0850 void __init time_init(void)
0851 {
0852     unsigned long freq;
0853 
0854     freq = tick_operations.frequency;
0855     tb_ticks_per_usec = freq / USEC_PER_SEC;
0856 
0857     clocksource_tick.name = tick_operations.name;
0858     clocksource_tick.read = clocksource_tick_read;
0859 
0860     clocksource_register_hz(&clocksource_tick, freq);
0861     printk("clocksource: mult[%x] shift[%d]\n",
0862            clocksource_tick.mult, clocksource_tick.shift);
0863 
0864     sparc64_clockevent.name = tick_operations.name;
0865     clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
0866 
0867     sparc64_clockevent.max_delta_ns =
0868         clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
0869     sparc64_clockevent.max_delta_ticks = 0x7fffffffffffffffUL;
0870     sparc64_clockevent.min_delta_ns =
0871         clockevent_delta2ns(0xF, &sparc64_clockevent);
0872     sparc64_clockevent.min_delta_ticks = 0xF;
0873 
0874     printk("clockevent: mult[%x] shift[%d]\n",
0875            sparc64_clockevent.mult, sparc64_clockevent.shift);
0876 
0877     setup_sparc64_timer();
0878 }
0879 
0880 unsigned long long sched_clock(void)
0881 {
0882     unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
0883     unsigned long offset = tick_operations.offset;
0884 
0885     /* Use barrier so the compiler emits the loads first and overlaps load
0886      * latency with reading tick, because reading %tick/%stick is a
0887      * post-sync instruction that will flush and restart subsequent
0888      * instructions after it commits.
0889      */
0890     barrier();
0891 
0892     return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
0893 }
0894 
0895 int read_current_timer(unsigned long *timer_val)
0896 {
0897     *timer_val = get_tick();
0898     return 0;
0899 }