Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * ARM-specific support for Broadcom STB S2/S3/S5 power management
0004  *
0005  * S2: clock gate CPUs and as many peripherals as possible
0006  * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
0007  *     self-refresh
0008  * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
0009  *     treat this mode like a soft power-off, with wakeup allowed from AON
0010  *
0011  * Copyright © 2014-2017 Broadcom
0012  */
0013 
0014 #define pr_fmt(fmt) "brcmstb-pm: " fmt
0015 
0016 #include <linux/bitops.h>
0017 #include <linux/compiler.h>
0018 #include <linux/delay.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/err.h>
0021 #include <linux/init.h>
0022 #include <linux/io.h>
0023 #include <linux/ioport.h>
0024 #include <linux/kconfig.h>
0025 #include <linux/kernel.h>
0026 #include <linux/memblock.h>
0027 #include <linux/module.h>
0028 #include <linux/notifier.h>
0029 #include <linux/of.h>
0030 #include <linux/of_address.h>
0031 #include <linux/panic_notifier.h>
0032 #include <linux/platform_device.h>
0033 #include <linux/pm.h>
0034 #include <linux/printk.h>
0035 #include <linux/proc_fs.h>
0036 #include <linux/sizes.h>
0037 #include <linux/slab.h>
0038 #include <linux/sort.h>
0039 #include <linux/suspend.h>
0040 #include <linux/types.h>
0041 #include <linux/uaccess.h>
0042 #include <linux/soc/brcmstb/brcmstb.h>
0043 
0044 #include <asm/fncpy.h>
0045 #include <asm/setup.h>
0046 #include <asm/suspend.h>
0047 
0048 #include "pm.h"
0049 #include "aon_defs.h"
0050 
0051 #define SHIMPHY_DDR_PAD_CNTRL       0x8c
0052 
0053 /* Method #0 */
0054 #define SHIMPHY_PAD_PLL_SEQUENCE    BIT(8)
0055 #define SHIMPHY_PAD_GATE_PLL_S3     BIT(9)
0056 
0057 /* Method #1 */
0058 #define PWRDWN_SEQ_NO_SEQUENCING    0
0059 #define PWRDWN_SEQ_HOLD_CHANNEL     1
0060 #define PWRDWN_SEQ_RESET_PLL        2
0061 #define PWRDWN_SEQ_POWERDOWN_PLL    3
0062 
0063 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK  0x00f00000
0064 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
0065 
0066 #define DDR_FORCE_CKE_RST_N     BIT(3)
0067 #define DDR_PHY_RST_N           BIT(2)
0068 #define DDR_PHY_CKE         BIT(1)
0069 
0070 #define DDR_PHY_NO_CHANNEL      0xffffffff
0071 
0072 #define MAX_NUM_MEMC            3
0073 
0074 struct brcmstb_memc {
0075     void __iomem *ddr_phy_base;
0076     void __iomem *ddr_shimphy_base;
0077     void __iomem *ddr_ctrl;
0078 };
0079 
0080 struct brcmstb_pm_control {
0081     void __iomem *aon_ctrl_base;
0082     void __iomem *aon_sram;
0083     struct brcmstb_memc memcs[MAX_NUM_MEMC];
0084 
0085     void __iomem *boot_sram;
0086     size_t boot_sram_len;
0087 
0088     bool support_warm_boot;
0089     size_t pll_status_offset;
0090     int num_memc;
0091 
0092     struct brcmstb_s3_params *s3_params;
0093     dma_addr_t s3_params_pa;
0094     int s3entry_method;
0095     u32 warm_boot_offset;
0096     u32 phy_a_standby_ctrl_offs;
0097     u32 phy_b_standby_ctrl_offs;
0098     bool needs_ddr_pad;
0099     struct platform_device *pdev;
0100 };
0101 
0102 enum bsp_initiate_command {
0103     BSP_CLOCK_STOP      = 0x00,
0104     BSP_GEN_RANDOM_KEY  = 0x4A,
0105     BSP_RESTORE_RANDOM_KEY  = 0x55,
0106     BSP_GEN_FIXED_KEY   = 0x63,
0107 };
0108 
0109 #define PM_INITIATE     0x01
0110 #define PM_INITIATE_SUCCESS 0x00
0111 #define PM_INITIATE_FAIL    0xfe
0112 
0113 static struct brcmstb_pm_control ctrl;
0114 
0115 noinline int brcmstb_pm_s3_finish(void);
0116 
0117 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
0118         void __iomem *ddr_phy_pll_status);
0119 
0120 static int brcmstb_init_sram(struct device_node *dn)
0121 {
0122     void __iomem *sram;
0123     struct resource res;
0124     int ret;
0125 
0126     ret = of_address_to_resource(dn, 0, &res);
0127     if (ret)
0128         return ret;
0129 
0130     /* Uncached, executable remapping of SRAM */
0131     sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
0132     if (!sram)
0133         return -ENOMEM;
0134 
0135     ctrl.boot_sram = sram;
0136     ctrl.boot_sram_len = resource_size(&res);
0137 
0138     return 0;
0139 }
0140 
0141 static const struct of_device_id sram_dt_ids[] = {
0142     { .compatible = "mmio-sram" },
0143     { /* sentinel */ }
0144 };
0145 
0146 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
0147 {
0148     void __iomem *base = ctrl.aon_ctrl_base;
0149     int ret;
0150     int timeo = 1000 * 1000; /* 1 second */
0151 
0152     writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
0153     (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
0154 
0155     /* Go! */
0156     writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
0157 
0158     /*
0159      * If firmware doesn't support the 'ack', then just assume it's done
0160      * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
0161      */
0162     if (of_machine_is_compatible("brcm,bcm74371a0")) {
0163         (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
0164         mdelay(10);
0165         return 0;
0166     }
0167 
0168     for (;;) {
0169         ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
0170         if (!(ret & PM_INITIATE))
0171             break;
0172         if (timeo <= 0) {
0173             pr_err("error: timeout waiting for BSP (%x)\n", ret);
0174             break;
0175         }
0176         timeo -= 50;
0177         udelay(50);
0178     }
0179 
0180     return (ret & 0xff) != PM_INITIATE_SUCCESS;
0181 }
0182 
0183 static int brcmstb_pm_handshake(void)
0184 {
0185     void __iomem *base = ctrl.aon_ctrl_base;
0186     u32 tmp;
0187     int ret;
0188 
0189     /* BSP power handshake, v1 */
0190     tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
0191     tmp &= ~1UL;
0192     writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
0193     (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
0194 
0195     ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
0196     if (ret)
0197         pr_err("BSP handshake failed\n");
0198 
0199     /*
0200      * HACK: BSP may have internal race on the CLOCK_STOP command.
0201      * Avoid touching the BSP for a few milliseconds.
0202      */
0203     mdelay(3);
0204 
0205     return ret;
0206 }
0207 
0208 static inline void shimphy_set(u32 value, u32 mask)
0209 {
0210     int i;
0211 
0212     if (!ctrl.needs_ddr_pad)
0213         return;
0214 
0215     for (i = 0; i < ctrl.num_memc; i++) {
0216         u32 tmp;
0217 
0218         tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
0219             SHIMPHY_DDR_PAD_CNTRL);
0220         tmp = value | (tmp & mask);
0221         writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
0222             SHIMPHY_DDR_PAD_CNTRL);
0223     }
0224     wmb(); /* Complete sequence in order. */
0225 }
0226 
0227 static inline void ddr_ctrl_set(bool warmboot)
0228 {
0229     int i;
0230 
0231     for (i = 0; i < ctrl.num_memc; i++) {
0232         u32 tmp;
0233 
0234         tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
0235                 ctrl.warm_boot_offset);
0236         if (warmboot)
0237             tmp |= 1;
0238         else
0239             tmp &= ~1; /* Cold boot */
0240         writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
0241                 ctrl.warm_boot_offset);
0242     }
0243     /* Complete sequence in order */
0244     wmb();
0245 }
0246 
0247 static inline void s3entry_method0(void)
0248 {
0249     shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
0250             0xffffffff);
0251 }
0252 
0253 static inline void s3entry_method1(void)
0254 {
0255     /*
0256      * S3 Entry Sequence
0257      * -----------------
0258      * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
0259      * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
0260      */
0261     shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
0262             SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
0263             ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
0264 
0265     ddr_ctrl_set(true);
0266 }
0267 
0268 static inline void s5entry_method1(void)
0269 {
0270     int i;
0271 
0272     /*
0273      * S5 Entry Sequence
0274      * -----------------
0275      * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
0276      * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
0277      * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
0278      *     DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
0279      */
0280     shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
0281             SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
0282             ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
0283 
0284     ddr_ctrl_set(false);
0285 
0286     for (i = 0; i < ctrl.num_memc; i++) {
0287         u32 tmp;
0288 
0289         /* Step 3: Channel A (RST_N = CKE = 0) */
0290         tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
0291                   ctrl.phy_a_standby_ctrl_offs);
0292         tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
0293         writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
0294                  ctrl.phy_a_standby_ctrl_offs);
0295 
0296         /* Step 3: Channel B? */
0297         if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
0298             tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
0299                       ctrl.phy_b_standby_ctrl_offs);
0300             tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
0301             writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
0302                      ctrl.phy_b_standby_ctrl_offs);
0303         }
0304     }
0305     /* Must complete */
0306     wmb();
0307 }
0308 
0309 /*
0310  * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
0311  * into a low-power mode
0312  */
0313 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
0314 {
0315     void __iomem *base = ctrl.aon_ctrl_base;
0316 
0317     if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
0318         s5entry_method1();
0319 
0320     /* pm_start_pwrdn transition 0->1 */
0321     writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
0322 
0323     if (!onewrite) {
0324         (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
0325 
0326         writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
0327         (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
0328     }
0329     wfi();
0330 }
0331 
0332 /* Support S5 cold boot out of "poweroff" */
0333 static void brcmstb_pm_poweroff(void)
0334 {
0335     brcmstb_pm_handshake();
0336 
0337     /* Clear magic S3 warm-boot value */
0338     writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
0339     (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
0340 
0341     /* Skip wait-for-interrupt signal; just use a countdown */
0342     writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
0343     (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
0344 
0345     if (ctrl.s3entry_method == 1) {
0346         shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
0347                  SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
0348                  ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
0349         ddr_ctrl_set(false);
0350         brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
0351         return; /* We should never actually get here */
0352     }
0353 
0354     brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
0355 }
0356 
0357 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
0358 {
0359     unsigned int size = ALIGN(len, FNCPY_ALIGN);
0360 
0361     if (ctrl.boot_sram_len < size) {
0362         pr_err("standby code will not fit in SRAM\n");
0363         return NULL;
0364     }
0365 
0366     return fncpy(ctrl.boot_sram, fn, size);
0367 }
0368 
0369 /*
0370  * S2 suspend/resume picks up where we left off, so we must execute carefully
0371  * from SRAM, in order to allow DDR to come back up safely before we continue.
0372  */
0373 static int brcmstb_pm_s2(void)
0374 {
0375     /* A previous S3 can set a value hazardous to S2, so make sure. */
0376     if (ctrl.s3entry_method == 1) {
0377         shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
0378                 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
0379                 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
0380         ddr_ctrl_set(false);
0381     }
0382 
0383     brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
0384             brcmstb_pm_do_s2_sz);
0385     if (!brcmstb_pm_do_s2_sram)
0386         return -EINVAL;
0387 
0388     return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
0389             ctrl.memcs[0].ddr_phy_base +
0390             ctrl.pll_status_offset);
0391 }
0392 
0393 /*
0394  * This function is called on a new stack, so don't allow inlining (which will
0395  * generate stack references on the old stack). It cannot be made static because
0396  * it is referenced from brcmstb_pm_s3()
0397  */
0398 noinline int brcmstb_pm_s3_finish(void)
0399 {
0400     struct brcmstb_s3_params *params = ctrl.s3_params;
0401     dma_addr_t params_pa = ctrl.s3_params_pa;
0402     phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
0403     enum bsp_initiate_command cmd;
0404     u32 flags;
0405 
0406     /*
0407      * Clear parameter structure, but not DTU area, which has already been
0408      * filled in. We know DTU is a the end, so we can just subtract its
0409      * size.
0410      */
0411     memset(params, 0, sizeof(*params) - sizeof(params->dtu));
0412 
0413     flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
0414 
0415     flags &= S3_BOOTLOADER_RESERVED;
0416     flags |= S3_FLAG_NO_MEM_VERIFY;
0417     flags |= S3_FLAG_LOAD_RANDKEY;
0418 
0419     /* Load random / fixed key */
0420     if (flags & S3_FLAG_LOAD_RANDKEY)
0421         cmd = BSP_GEN_RANDOM_KEY;
0422     else
0423         cmd = BSP_GEN_FIXED_KEY;
0424     if (do_bsp_initiate_command(cmd)) {
0425         pr_info("key loading failed\n");
0426         return -EIO;
0427     }
0428 
0429     params->magic = BRCMSTB_S3_MAGIC;
0430     params->reentry = reentry;
0431 
0432     /* No more writes to DRAM */
0433     flush_cache_all();
0434 
0435     flags |= BRCMSTB_S3_MAGIC_SHORT;
0436 
0437     writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
0438     writel_relaxed(lower_32_bits(params_pa),
0439                ctrl.aon_sram + AON_REG_CONTROL_LOW);
0440     writel_relaxed(upper_32_bits(params_pa),
0441                ctrl.aon_sram + AON_REG_CONTROL_HIGH);
0442 
0443     switch (ctrl.s3entry_method) {
0444     case 0:
0445         s3entry_method0();
0446         brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
0447         break;
0448     case 1:
0449         s3entry_method1();
0450         brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
0451         break;
0452     default:
0453         return -EINVAL;
0454     }
0455 
0456     /* Must have been interrupted from wfi()? */
0457     return -EINTR;
0458 }
0459 
0460 static int brcmstb_pm_do_s3(unsigned long sp)
0461 {
0462     unsigned long save_sp;
0463     int ret;
0464 
0465     asm volatile (
0466         "mov    %[save], sp\n"
0467         "mov    sp, %[new]\n"
0468         "bl brcmstb_pm_s3_finish\n"
0469         "mov    %[ret], r0\n"
0470         "mov    %[new], sp\n"
0471         "mov    sp, %[save]\n"
0472         : [save] "=&r" (save_sp), [ret] "=&r" (ret)
0473         : [new] "r" (sp)
0474     );
0475 
0476     return ret;
0477 }
0478 
0479 static int brcmstb_pm_s3(void)
0480 {
0481     void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
0482 
0483     return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
0484 }
0485 
0486 static int brcmstb_pm_standby(bool deep_standby)
0487 {
0488     int ret;
0489 
0490     if (brcmstb_pm_handshake())
0491         return -EIO;
0492 
0493     if (deep_standby)
0494         ret = brcmstb_pm_s3();
0495     else
0496         ret = brcmstb_pm_s2();
0497     if (ret)
0498         pr_err("%s: standby failed\n", __func__);
0499 
0500     return ret;
0501 }
0502 
0503 static int brcmstb_pm_enter(suspend_state_t state)
0504 {
0505     int ret = -EINVAL;
0506 
0507     switch (state) {
0508     case PM_SUSPEND_STANDBY:
0509         ret = brcmstb_pm_standby(false);
0510         break;
0511     case PM_SUSPEND_MEM:
0512         ret = brcmstb_pm_standby(true);
0513         break;
0514     }
0515 
0516     return ret;
0517 }
0518 
0519 static int brcmstb_pm_valid(suspend_state_t state)
0520 {
0521     switch (state) {
0522     case PM_SUSPEND_STANDBY:
0523         return true;
0524     case PM_SUSPEND_MEM:
0525         return ctrl.support_warm_boot;
0526     default:
0527         return false;
0528     }
0529 }
0530 
0531 static const struct platform_suspend_ops brcmstb_pm_ops = {
0532     .enter      = brcmstb_pm_enter,
0533     .valid      = brcmstb_pm_valid,
0534 };
0535 
0536 static const struct of_device_id aon_ctrl_dt_ids[] = {
0537     { .compatible = "brcm,brcmstb-aon-ctrl" },
0538     {}
0539 };
0540 
0541 struct ddr_phy_ofdata {
0542     bool supports_warm_boot;
0543     size_t pll_status_offset;
0544     int s3entry_method;
0545     u32 warm_boot_offset;
0546     u32 phy_a_standby_ctrl_offs;
0547     u32 phy_b_standby_ctrl_offs;
0548 };
0549 
0550 static struct ddr_phy_ofdata ddr_phy_71_1 = {
0551     .supports_warm_boot = true,
0552     .pll_status_offset = 0x0c,
0553     .s3entry_method = 1,
0554     .warm_boot_offset = 0x2c,
0555     .phy_a_standby_ctrl_offs = 0x198,
0556     .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
0557 };
0558 
0559 static struct ddr_phy_ofdata ddr_phy_72_0 = {
0560     .supports_warm_boot = true,
0561     .pll_status_offset = 0x10,
0562     .s3entry_method = 1,
0563     .warm_boot_offset = 0x40,
0564     .phy_a_standby_ctrl_offs = 0x2a4,
0565     .phy_b_standby_ctrl_offs = 0x8a4
0566 };
0567 
0568 static struct ddr_phy_ofdata ddr_phy_225_1 = {
0569     .supports_warm_boot = false,
0570     .pll_status_offset = 0x4,
0571     .s3entry_method = 0
0572 };
0573 
0574 static struct ddr_phy_ofdata ddr_phy_240_1 = {
0575     .supports_warm_boot = true,
0576     .pll_status_offset = 0x4,
0577     .s3entry_method = 0
0578 };
0579 
0580 static const struct of_device_id ddr_phy_dt_ids[] = {
0581     {
0582         .compatible = "brcm,brcmstb-ddr-phy-v71.1",
0583         .data = &ddr_phy_71_1,
0584     },
0585     {
0586         .compatible = "brcm,brcmstb-ddr-phy-v72.0",
0587         .data = &ddr_phy_72_0,
0588     },
0589     {
0590         .compatible = "brcm,brcmstb-ddr-phy-v225.1",
0591         .data = &ddr_phy_225_1,
0592     },
0593     {
0594         .compatible = "brcm,brcmstb-ddr-phy-v240.1",
0595         .data = &ddr_phy_240_1,
0596     },
0597     {
0598         /* Same as v240.1, for the registers we care about */
0599         .compatible = "brcm,brcmstb-ddr-phy-v240.2",
0600         .data = &ddr_phy_240_1,
0601     },
0602     {}
0603 };
0604 
0605 struct ddr_seq_ofdata {
0606     bool needs_ddr_pad;
0607     u32 warm_boot_offset;
0608 };
0609 
0610 static const struct ddr_seq_ofdata ddr_seq_b22 = {
0611     .needs_ddr_pad = false,
0612     .warm_boot_offset = 0x2c,
0613 };
0614 
0615 static const struct ddr_seq_ofdata ddr_seq = {
0616     .needs_ddr_pad = true,
0617 };
0618 
0619 static const struct of_device_id ddr_shimphy_dt_ids[] = {
0620     { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
0621     {}
0622 };
0623 
0624 static const struct of_device_id brcmstb_memc_of_match[] = {
0625     {
0626         .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
0627         .data = &ddr_seq,
0628     },
0629     {
0630         .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
0631         .data = &ddr_seq_b22,
0632     },
0633     {
0634         .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
0635         .data = &ddr_seq_b22,
0636     },
0637     {
0638         .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
0639         .data = &ddr_seq_b22,
0640     },
0641     {
0642         .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
0643         .data = &ddr_seq_b22,
0644     },
0645     {
0646         .compatible = "brcm,brcmstb-memc-ddr",
0647         .data = &ddr_seq,
0648     },
0649     {},
0650 };
0651 
0652 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
0653                        int index, const void **ofdata)
0654 {
0655     struct device_node *dn;
0656     const struct of_device_id *match;
0657 
0658     dn = of_find_matching_node_and_match(NULL, matches, &match);
0659     if (!dn)
0660         return ERR_PTR(-EINVAL);
0661 
0662     if (ofdata)
0663         *ofdata = match->data;
0664 
0665     return of_io_request_and_map(dn, index, dn->full_name);
0666 }
0667 
0668 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
0669         unsigned long action, void *data)
0670 {
0671     writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
0672 
0673     return NOTIFY_DONE;
0674 }
0675 
0676 static struct notifier_block brcmstb_pm_panic_nb = {
0677     .notifier_call = brcmstb_pm_panic_notify,
0678 };
0679 
0680 static int brcmstb_pm_probe(struct platform_device *pdev)
0681 {
0682     const struct ddr_phy_ofdata *ddr_phy_data;
0683     const struct ddr_seq_ofdata *ddr_seq_data;
0684     const struct of_device_id *of_id = NULL;
0685     struct device_node *dn;
0686     void __iomem *base;
0687     int ret, i, s;
0688 
0689     /* AON ctrl registers */
0690     base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
0691     if (IS_ERR(base)) {
0692         pr_err("error mapping AON_CTRL\n");
0693         ret = PTR_ERR(base);
0694         goto aon_err;
0695     }
0696     ctrl.aon_ctrl_base = base;
0697 
0698     /* AON SRAM registers */
0699     base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
0700     if (IS_ERR(base)) {
0701         /* Assume standard offset */
0702         ctrl.aon_sram = ctrl.aon_ctrl_base +
0703                      AON_CTRL_SYSTEM_DATA_RAM_OFS;
0704         s = 0;
0705     } else {
0706         ctrl.aon_sram = base;
0707         s = 1;
0708     }
0709 
0710     writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
0711 
0712     /* DDR PHY registers */
0713     base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
0714                      (const void **)&ddr_phy_data);
0715     if (IS_ERR(base)) {
0716         pr_err("error mapping DDR PHY\n");
0717         ret = PTR_ERR(base);
0718         goto ddr_phy_err;
0719     }
0720     ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
0721     ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
0722     /* Only need DDR PHY 0 for now? */
0723     ctrl.memcs[0].ddr_phy_base = base;
0724     ctrl.s3entry_method = ddr_phy_data->s3entry_method;
0725     ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
0726     ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
0727     /*
0728      * Slightly gross to use the phy ver to get a memc,
0729      * offset but that is the only versioned things so far
0730      * we can test for.
0731      */
0732     ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
0733 
0734     /* DDR SHIM-PHY registers */
0735     for_each_matching_node(dn, ddr_shimphy_dt_ids) {
0736         i = ctrl.num_memc;
0737         if (i >= MAX_NUM_MEMC) {
0738             of_node_put(dn);
0739             pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
0740             break;
0741         }
0742 
0743         base = of_io_request_and_map(dn, 0, dn->full_name);
0744         if (IS_ERR(base)) {
0745             of_node_put(dn);
0746             if (!ctrl.support_warm_boot)
0747                 break;
0748 
0749             pr_err("error mapping DDR SHIMPHY %d\n", i);
0750             ret = PTR_ERR(base);
0751             goto ddr_shimphy_err;
0752         }
0753         ctrl.memcs[i].ddr_shimphy_base = base;
0754         ctrl.num_memc++;
0755     }
0756 
0757     /* Sequencer DRAM Param and Control Registers */
0758     i = 0;
0759     for_each_matching_node(dn, brcmstb_memc_of_match) {
0760         base = of_iomap(dn, 0);
0761         if (!base) {
0762             of_node_put(dn);
0763             pr_err("error mapping DDR Sequencer %d\n", i);
0764             ret = -ENOMEM;
0765             goto brcmstb_memc_err;
0766         }
0767 
0768         of_id = of_match_node(brcmstb_memc_of_match, dn);
0769         if (!of_id) {
0770             iounmap(base);
0771             of_node_put(dn);
0772             ret = -EINVAL;
0773             goto brcmstb_memc_err;
0774         }
0775 
0776         ddr_seq_data = of_id->data;
0777         ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
0778         /* Adjust warm boot offset based on the DDR sequencer */
0779         if (ddr_seq_data->warm_boot_offset)
0780             ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
0781 
0782         ctrl.memcs[i].ddr_ctrl = base;
0783         i++;
0784     }
0785 
0786     pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
0787         ctrl.support_warm_boot, ctrl.s3entry_method,
0788         ctrl.warm_boot_offset);
0789 
0790     dn = of_find_matching_node(NULL, sram_dt_ids);
0791     if (!dn) {
0792         pr_err("SRAM not found\n");
0793         ret = -EINVAL;
0794         goto brcmstb_memc_err;
0795     }
0796 
0797     ret = brcmstb_init_sram(dn);
0798     of_node_put(dn);
0799     if (ret) {
0800         pr_err("error setting up SRAM for PM\n");
0801         goto brcmstb_memc_err;
0802     }
0803 
0804     ctrl.pdev = pdev;
0805 
0806     ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
0807     if (!ctrl.s3_params) {
0808         ret = -ENOMEM;
0809         goto s3_params_err;
0810     }
0811     ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
0812                        sizeof(*ctrl.s3_params),
0813                        DMA_TO_DEVICE);
0814     if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
0815         pr_err("error mapping DMA memory\n");
0816         ret = -ENOMEM;
0817         goto out;
0818     }
0819 
0820     atomic_notifier_chain_register(&panic_notifier_list,
0821                        &brcmstb_pm_panic_nb);
0822 
0823     pm_power_off = brcmstb_pm_poweroff;
0824     suspend_set_ops(&brcmstb_pm_ops);
0825 
0826     return 0;
0827 
0828 out:
0829     kfree(ctrl.s3_params);
0830 s3_params_err:
0831     iounmap(ctrl.boot_sram);
0832 brcmstb_memc_err:
0833     for (i--; i >= 0; i--)
0834         iounmap(ctrl.memcs[i].ddr_ctrl);
0835 ddr_shimphy_err:
0836     for (i = 0; i < ctrl.num_memc; i++)
0837         iounmap(ctrl.memcs[i].ddr_shimphy_base);
0838 
0839     iounmap(ctrl.memcs[0].ddr_phy_base);
0840 ddr_phy_err:
0841     iounmap(ctrl.aon_ctrl_base);
0842     if (s)
0843         iounmap(ctrl.aon_sram);
0844 aon_err:
0845     pr_warn("PM: initialization failed with code %d\n", ret);
0846 
0847     return ret;
0848 }
0849 
0850 static struct platform_driver brcmstb_pm_driver = {
0851     .driver = {
0852         .name   = "brcmstb-pm",
0853         .of_match_table = aon_ctrl_dt_ids,
0854     },
0855 };
0856 
0857 static int __init brcmstb_pm_init(void)
0858 {
0859     return platform_driver_probe(&brcmstb_pm_driver,
0860                      brcmstb_pm_probe);
0861 }
0862 module_init(brcmstb_pm_init);