Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * MIPS idle loop and WAIT instruction support.
0004  *
0005  * Copyright (C) xxxx  the Anonymous
0006  * Copyright (C) 1994 - 2006 Ralf Baechle
0007  * Copyright (C) 2003, 2004  Maciej W. Rozycki
0008  * Copyright (C) 2001, 2004, 2011, 2012  MIPS Technologies, Inc.
0009  */
0010 #include <linux/cpu.h>
0011 #include <linux/export.h>
0012 #include <linux/init.h>
0013 #include <linux/irqflags.h>
0014 #include <linux/printk.h>
0015 #include <linux/sched.h>
0016 #include <asm/cpu.h>
0017 #include <asm/cpu-info.h>
0018 #include <asm/cpu-type.h>
0019 #include <asm/idle.h>
0020 #include <asm/mipsregs.h>
0021 
0022 /*
0023  * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
0024  * the implementation of the "wait" feature differs between CPU families. This
0025  * points to the function that implements CPU specific wait.
0026  * The wait instruction stops the pipeline and reduces the power consumption of
0027  * the CPU very much.
0028  */
0029 void (*cpu_wait)(void);
0030 EXPORT_SYMBOL(cpu_wait);
0031 
0032 static void __cpuidle r3081_wait(void)
0033 {
0034     unsigned long cfg = read_c0_conf();
0035     write_c0_conf(cfg | R30XX_CONF_HALT);
0036     raw_local_irq_enable();
0037 }
0038 
0039 void __cpuidle r4k_wait(void)
0040 {
0041     raw_local_irq_enable();
0042     __r4k_wait();
0043 }
0044 
0045 /*
0046  * This variant is preferable as it allows testing need_resched and going to
0047  * sleep depending on the outcome atomically.  Unfortunately the "It is
0048  * implementation-dependent whether the pipeline restarts when a non-enabled
0049  * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
0050  * using this version a gamble.
0051  */
0052 void __cpuidle r4k_wait_irqoff(void)
0053 {
0054     if (!need_resched())
0055         __asm__(
0056         "   .set    push        \n"
0057         "   .set    arch=r4000  \n"
0058         "   wait            \n"
0059         "   .set    pop     \n");
0060     raw_local_irq_enable();
0061 }
0062 
0063 /*
0064  * The RM7000 variant has to handle erratum 38.  The workaround is to not
0065  * have any pending stores when the WAIT instruction is executed.
0066  */
0067 static void __cpuidle rm7k_wait_irqoff(void)
0068 {
0069     if (!need_resched())
0070         __asm__(
0071         "   .set    push                    \n"
0072         "   .set    arch=r4000              \n"
0073         "   .set    noat                    \n"
0074         "   mfc0    $1, $12                 \n"
0075         "   sync                        \n"
0076         "   mtc0    $1, $12     # stalls until W stage  \n"
0077         "   wait                        \n"
0078         "   mtc0    $1, $12     # stalls until W stage  \n"
0079         "   .set    pop                 \n");
0080     raw_local_irq_enable();
0081 }
0082 
0083 /*
0084  * Au1 'wait' is only useful when the 32kHz counter is used as timer,
0085  * since coreclock (and the cp0 counter) stops upon executing it. Only an
0086  * interrupt can wake it, so they must be enabled before entering idle modes.
0087  */
0088 static void __cpuidle au1k_wait(void)
0089 {
0090     unsigned long c0status = read_c0_status() | 1;  /* irqs on */
0091 
0092     __asm__(
0093     "   .set    push            \n"
0094     "   .set    arch=r4000      \n"
0095     "   cache   0x14, 0(%0)     \n"
0096     "   cache   0x14, 32(%0)        \n"
0097     "   sync                \n"
0098     "   mtc0    %1, $12         \n" /* wr c0status */
0099     "   wait                \n"
0100     "   nop             \n"
0101     "   nop             \n"
0102     "   nop             \n"
0103     "   nop             \n"
0104     "   .set    pop         \n"
0105     : : "r" (au1k_wait), "r" (c0status));
0106 }
0107 
0108 static int __initdata nowait;
0109 
0110 static int __init wait_disable(char *s)
0111 {
0112     nowait = 1;
0113 
0114     return 1;
0115 }
0116 
0117 __setup("nowait", wait_disable);
0118 
0119 void __init check_wait(void)
0120 {
0121     struct cpuinfo_mips *c = &current_cpu_data;
0122 
0123     if (nowait) {
0124         printk("Wait instruction disabled.\n");
0125         return;
0126     }
0127 
0128     /*
0129      * MIPSr6 specifies that masked interrupts should unblock an executing
0130      * wait instruction, and thus that it is safe for us to use
0131      * r4k_wait_irqoff. Yippee!
0132      */
0133     if (cpu_has_mips_r6) {
0134         cpu_wait = r4k_wait_irqoff;
0135         return;
0136     }
0137 
0138     switch (current_cpu_type()) {
0139     case CPU_R3081:
0140     case CPU_R3081E:
0141         cpu_wait = r3081_wait;
0142         break;
0143     case CPU_R4200:
0144 /*  case CPU_R4300: */
0145     case CPU_R4600:
0146     case CPU_R4640:
0147     case CPU_R4650:
0148     case CPU_R4700:
0149     case CPU_R5000:
0150     case CPU_R5500:
0151     case CPU_NEVADA:
0152     case CPU_4KC:
0153     case CPU_4KEC:
0154     case CPU_4KSC:
0155     case CPU_5KC:
0156     case CPU_5KE:
0157     case CPU_25KF:
0158     case CPU_PR4450:
0159     case CPU_BMIPS3300:
0160     case CPU_BMIPS4350:
0161     case CPU_BMIPS4380:
0162     case CPU_CAVIUM_OCTEON:
0163     case CPU_CAVIUM_OCTEON_PLUS:
0164     case CPU_CAVIUM_OCTEON2:
0165     case CPU_CAVIUM_OCTEON3:
0166     case CPU_XBURST:
0167     case CPU_LOONGSON32:
0168         cpu_wait = r4k_wait;
0169         break;
0170     case CPU_LOONGSON64:
0171         if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
0172                 (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
0173                 (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
0174             cpu_wait = r4k_wait;
0175         break;
0176 
0177     case CPU_BMIPS5000:
0178         cpu_wait = r4k_wait_irqoff;
0179         break;
0180     case CPU_RM7000:
0181         cpu_wait = rm7k_wait_irqoff;
0182         break;
0183 
0184     case CPU_PROAPTIV:
0185     case CPU_P5600:
0186         /*
0187          * Incoming Fast Debug Channel (FDC) data during a wait
0188          * instruction causes the wait never to resume, even if an
0189          * interrupt is received. Avoid using wait at all if FDC data is
0190          * likely to be received.
0191          */
0192         if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
0193             break;
0194         fallthrough;
0195     case CPU_M14KC:
0196     case CPU_M14KEC:
0197     case CPU_24K:
0198     case CPU_34K:
0199     case CPU_1004K:
0200     case CPU_1074K:
0201     case CPU_INTERAPTIV:
0202     case CPU_M5150:
0203     case CPU_QEMU_GENERIC:
0204         cpu_wait = r4k_wait;
0205         if (read_c0_config7() & MIPS_CONF7_WII)
0206             cpu_wait = r4k_wait_irqoff;
0207         break;
0208 
0209     case CPU_74K:
0210         cpu_wait = r4k_wait;
0211         if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
0212             cpu_wait = r4k_wait_irqoff;
0213         break;
0214 
0215     case CPU_TX49XX:
0216         cpu_wait = r4k_wait_irqoff;
0217         break;
0218     case CPU_ALCHEMY:
0219         cpu_wait = au1k_wait;
0220         break;
0221     case CPU_20KC:
0222         /*
0223          * WAIT on Rev1.0 has E1, E2, E3 and E16.
0224          * WAIT on Rev2.0 and Rev3.0 has E16.
0225          * Rev3.1 WAIT is nop, why bother
0226          */
0227         if ((c->processor_id & 0xff) <= 0x64)
0228             break;
0229 
0230         /*
0231          * Another rev is incrementing c0_count at a reduced clock
0232          * rate while in WAIT mode.  So we basically have the choice
0233          * between using the cp0 timer as clocksource or avoiding
0234          * the WAIT instruction.  Until more details are known,
0235          * disable the use of WAIT for 20Kc entirely.
0236            cpu_wait = r4k_wait;
0237          */
0238         break;
0239     default:
0240         break;
0241     }
0242 }
0243 
0244 void arch_cpu_idle(void)
0245 {
0246     if (cpu_wait)
0247         cpu_wait();
0248     else
0249         raw_local_irq_enable();
0250 }
0251 
0252 #ifdef CONFIG_CPU_IDLE
0253 
0254 int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
0255                 struct cpuidle_driver *drv, int index)
0256 {
0257     arch_cpu_idle();
0258     return index;
0259 }
0260 
0261 #endif