Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * EMIF driver
0004  *
0005  * Copyright (C) 2012 Texas Instruments, Inc.
0006  *
0007  * Aneesh V <aneesh@ti.com>
0008  * Santosh Shilimkar <santosh.shilimkar@ti.com>
0009  */
0010 #include <linux/err.h>
0011 #include <linux/kernel.h>
0012 #include <linux/reboot.h>
0013 #include <linux/platform_data/emif_plat.h>
0014 #include <linux/io.h>
0015 #include <linux/device.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/slab.h>
0019 #include <linux/of.h>
0020 #include <linux/debugfs.h>
0021 #include <linux/seq_file.h>
0022 #include <linux/module.h>
0023 #include <linux/list.h>
0024 #include <linux/spinlock.h>
0025 #include <linux/pm.h>
0026 
0027 #include "emif.h"
0028 #include "jedec_ddr.h"
0029 #include "of_memory.h"
0030 
0031 /**
0032  * struct emif_data - Per device static data for driver's use
0033  * @duplicate:          Whether the DDR devices attached to this EMIF
0034  *              instance are exactly same as that on EMIF1. In
0035  *              this case we can save some memory and processing
0036  * @temperature_level:      Maximum temperature of LPDDR2 devices attached
0037  *              to this EMIF - read from MR4 register. If there
0038  *              are two devices attached to this EMIF, this
0039  *              value is the maximum of the two temperature
0040  *              levels.
0041  * @node:           node in the device list
0042  * @base:           base address of memory-mapped IO registers.
0043  * @dev:            device pointer.
0044  * @regs_cache:         An array of 'struct emif_regs' that stores
0045  *              calculated register values for different
0046  *              frequencies, to avoid re-calculating them on
0047  *              each DVFS transition.
0048  * @curr_regs:          The set of register values used in the last
0049  *              frequency change (i.e. corresponding to the
0050  *              frequency in effect at the moment)
0051  * @plat_data:          Pointer to saved platform data.
0052  * @debugfs_root:       dentry to the root folder for EMIF in debugfs
0053  * @np_ddr:         Pointer to ddr device tree node
0054  */
0055 struct emif_data {
0056     u8              duplicate;
0057     u8              temperature_level;
0058     u8              lpmode;
0059     struct list_head        node;
0060     unsigned long           irq_state;
0061     void __iomem            *base;
0062     struct device           *dev;
0063     struct emif_regs        *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
0064     struct emif_regs        *curr_regs;
0065     struct emif_platform_data   *plat_data;
0066     struct dentry           *debugfs_root;
0067     struct device_node      *np_ddr;
0068 };
0069 
0070 static struct emif_data *emif1;
0071 static DEFINE_SPINLOCK(emif_lock);
0072 static unsigned long    irq_state;
0073 static LIST_HEAD(device_list);
0074 
0075 #ifdef CONFIG_DEBUG_FS
0076 static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
0077     struct emif_regs *regs)
0078 {
0079     u32 type = emif->plat_data->device_info->type;
0080     u32 ip_rev = emif->plat_data->ip_rev;
0081 
0082     seq_printf(s, "EMIF register cache dump for %dMHz\n",
0083         regs->freq/1000000);
0084 
0085     seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
0086     seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
0087     seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
0088     seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
0089 
0090     if (ip_rev == EMIF_4D) {
0091         seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
0092             regs->read_idle_ctrl_shdw_normal);
0093         seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
0094             regs->read_idle_ctrl_shdw_volt_ramp);
0095     } else if (ip_rev == EMIF_4D5) {
0096         seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
0097             regs->dll_calib_ctrl_shdw_normal);
0098         seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
0099             regs->dll_calib_ctrl_shdw_volt_ramp);
0100     }
0101 
0102     if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
0103         seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
0104             regs->ref_ctrl_shdw_derated);
0105         seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
0106             regs->sdram_tim1_shdw_derated);
0107         seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
0108             regs->sdram_tim3_shdw_derated);
0109     }
0110 }
0111 
0112 static int emif_regdump_show(struct seq_file *s, void *unused)
0113 {
0114     struct emif_data    *emif   = s->private;
0115     struct emif_regs    **regs_cache;
0116     int         i;
0117 
0118     if (emif->duplicate)
0119         regs_cache = emif1->regs_cache;
0120     else
0121         regs_cache = emif->regs_cache;
0122 
0123     for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
0124         do_emif_regdump_show(s, emif, regs_cache[i]);
0125         seq_putc(s, '\n');
0126     }
0127 
0128     return 0;
0129 }
0130 
0131 DEFINE_SHOW_ATTRIBUTE(emif_regdump);
0132 
0133 static int emif_mr4_show(struct seq_file *s, void *unused)
0134 {
0135     struct emif_data *emif = s->private;
0136 
0137     seq_printf(s, "MR4=%d\n", emif->temperature_level);
0138     return 0;
0139 }
0140 
0141 DEFINE_SHOW_ATTRIBUTE(emif_mr4);
0142 
0143 static int __init_or_module emif_debugfs_init(struct emif_data *emif)
0144 {
0145     emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
0146     debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
0147                 &emif_regdump_fops);
0148     debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
0149                 &emif_mr4_fops);
0150     return 0;
0151 }
0152 
0153 static void __exit emif_debugfs_exit(struct emif_data *emif)
0154 {
0155     debugfs_remove_recursive(emif->debugfs_root);
0156     emif->debugfs_root = NULL;
0157 }
0158 #else
0159 static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
0160 {
0161     return 0;
0162 }
0163 
0164 static inline void __exit emif_debugfs_exit(struct emif_data *emif)
0165 {
0166 }
0167 #endif
0168 
0169 /*
0170  * Get bus width used by EMIF. Note that this may be different from the
0171  * bus width of the DDR devices used. For instance two 16-bit DDR devices
0172  * may be connected to a given CS of EMIF. In this case bus width as far
0173  * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
0174  */
0175 static u32 get_emif_bus_width(struct emif_data *emif)
0176 {
0177     u32     width;
0178     void __iomem    *base = emif->base;
0179 
0180     width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
0181             >> NARROW_MODE_SHIFT;
0182     width = width == 0 ? 32 : 16;
0183 
0184     return width;
0185 }
0186 
0187 static void set_lpmode(struct emif_data *emif, u8 lpmode)
0188 {
0189     u32 temp;
0190     void __iomem *base = emif->base;
0191 
0192     /*
0193      * Workaround for errata i743 - LPDDR2 Power-Down State is Not
0194      * Efficient
0195      *
0196      * i743 DESCRIPTION:
0197      * The EMIF supports power-down state for low power. The EMIF
0198      * automatically puts the SDRAM into power-down after the memory is
0199      * not accessed for a defined number of cycles and the
0200      * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
0201      * As the EMIF supports automatic output impedance calibration, a ZQ
0202      * calibration long command is issued every time it exits active
0203      * power-down and precharge power-down modes. The EMIF waits and
0204      * blocks any other command during this calibration.
0205      * The EMIF does not allow selective disabling of ZQ calibration upon
0206      * exit of power-down mode. Due to very short periods of power-down
0207      * cycles, ZQ calibration overhead creates bandwidth issues and
0208      * increases overall system power consumption. On the other hand,
0209      * issuing ZQ calibration long commands when exiting self-refresh is
0210      * still required.
0211      *
0212      * WORKAROUND
0213      * Because there is no power consumption benefit of the power-down due
0214      * to the calibration and there is a performance risk, the guideline
0215      * is to not allow power-down state and, therefore, to not have set
0216      * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
0217      */
0218     if ((emif->plat_data->ip_rev == EMIF_4D) &&
0219         (lpmode == EMIF_LP_MODE_PWR_DN)) {
0220         WARN_ONCE(1,
0221               "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
0222         /* rollback LP_MODE to Self-refresh mode */
0223         lpmode = EMIF_LP_MODE_SELF_REFRESH;
0224     }
0225 
0226     temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
0227     temp &= ~LP_MODE_MASK;
0228     temp |= (lpmode << LP_MODE_SHIFT);
0229     writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
0230 }
0231 
0232 static void do_freq_update(void)
0233 {
0234     struct emif_data *emif;
0235 
0236     /*
0237      * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
0238      *
0239      * i728 DESCRIPTION:
0240      * The EMIF automatically puts the SDRAM into self-refresh mode
0241      * after the EMIF has not performed accesses during
0242      * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
0243      * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
0244      * to 0x2. If during a small window the following three events
0245      * occur:
0246      * - The SR_TIMING counter expires
0247      * - And frequency change is requested
0248      * - And OCP access is requested
0249      * Then it causes instable clock on the DDR interface.
0250      *
0251      * WORKAROUND
0252      * To avoid the occurrence of the three events, the workaround
0253      * is to disable the self-refresh when requesting a frequency
0254      * change. Before requesting a frequency change the software must
0255      * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
0256      * frequency change has been done, the software can reprogram
0257      * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
0258      */
0259     list_for_each_entry(emif, &device_list, node) {
0260         if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
0261             set_lpmode(emif, EMIF_LP_MODE_DISABLE);
0262     }
0263 
0264     /*
0265      * TODO: Do FREQ_UPDATE here when an API
0266      * is available for this as part of the new
0267      * clock framework
0268      */
0269 
0270     list_for_each_entry(emif, &device_list, node) {
0271         if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
0272             set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
0273     }
0274 }
0275 
0276 /* Find addressing table entry based on the device's type and density */
0277 static const struct lpddr2_addressing *get_addressing_table(
0278     const struct ddr_device_info *device_info)
0279 {
0280     u32     index, type, density;
0281 
0282     type = device_info->type;
0283     density = device_info->density;
0284 
0285     switch (type) {
0286     case DDR_TYPE_LPDDR2_S4:
0287         index = density - 1;
0288         break;
0289     case DDR_TYPE_LPDDR2_S2:
0290         switch (density) {
0291         case DDR_DENSITY_1Gb:
0292         case DDR_DENSITY_2Gb:
0293             index = density + 3;
0294             break;
0295         default:
0296             index = density - 1;
0297         }
0298         break;
0299     default:
0300         return NULL;
0301     }
0302 
0303     return &lpddr2_jedec_addressing_table[index];
0304 }
0305 
0306 static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
0307         bool cs1_used, bool cal_resistors_per_cs)
0308 {
0309     u32 zq = 0, val = 0;
0310 
0311     val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
0312     zq |= val << ZQ_REFINTERVAL_SHIFT;
0313 
0314     val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
0315     zq |= val << ZQ_ZQCL_MULT_SHIFT;
0316 
0317     val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
0318     zq |= val << ZQ_ZQINIT_MULT_SHIFT;
0319 
0320     zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
0321 
0322     if (cal_resistors_per_cs)
0323         zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
0324     else
0325         zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
0326 
0327     zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
0328 
0329     val = cs1_used ? 1 : 0;
0330     zq |= val << ZQ_CS1EN_SHIFT;
0331 
0332     return zq;
0333 }
0334 
0335 static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
0336         const struct emif_custom_configs *custom_configs, bool cs1_used,
0337         u32 sdram_io_width, u32 emif_bus_width)
0338 {
0339     u32 alert = 0, interval, devcnt;
0340 
0341     if (custom_configs && (custom_configs->mask &
0342                 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
0343         interval = custom_configs->temp_alert_poll_interval_ms;
0344     else
0345         interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
0346 
0347     interval *= 1000000;            /* Convert to ns */
0348     interval /= addressing->tREFI_ns;   /* Convert to refresh cycles */
0349     alert |= (interval << TA_REFINTERVAL_SHIFT);
0350 
0351     /*
0352      * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
0353      * also to this form and subtract to get TA_DEVCNT, which is
0354      * in log2(x) form.
0355      */
0356     emif_bus_width = __fls(emif_bus_width) - 1;
0357     devcnt = emif_bus_width - sdram_io_width;
0358     alert |= devcnt << TA_DEVCNT_SHIFT;
0359 
0360     /* DEVWDT is in 'log2(x) - 3' form */
0361     alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
0362 
0363     alert |= 1 << TA_SFEXITEN_SHIFT;
0364     alert |= 1 << TA_CS0EN_SHIFT;
0365     alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
0366 
0367     return alert;
0368 }
0369 
0370 static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
0371 {
0372     u32 pwr_mgmt_ctrl   = 0, timeout;
0373     u32 lpmode      = EMIF_LP_MODE_SELF_REFRESH;
0374     u32 timeout_perf    = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
0375     u32 timeout_pwr     = EMIF_LP_MODE_TIMEOUT_POWER;
0376     u32 freq_threshold  = EMIF_LP_MODE_FREQ_THRESHOLD;
0377     u32 mask;
0378     u8 shift;
0379 
0380     struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
0381 
0382     if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
0383         lpmode      = cust_cfgs->lpmode;
0384         timeout_perf    = cust_cfgs->lpmode_timeout_performance;
0385         timeout_pwr = cust_cfgs->lpmode_timeout_power;
0386         freq_threshold  = cust_cfgs->lpmode_freq_threshold;
0387     }
0388 
0389     /* Timeout based on DDR frequency */
0390     timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
0391 
0392     /*
0393      * The value to be set in register is "log2(timeout) - 3"
0394      * if timeout < 16 load 0 in register
0395      * if timeout is not a power of 2, round to next highest power of 2
0396      */
0397     if (timeout < 16) {
0398         timeout = 0;
0399     } else {
0400         if (timeout & (timeout - 1))
0401             timeout <<= 1;
0402         timeout = __fls(timeout) - 3;
0403     }
0404 
0405     switch (lpmode) {
0406     case EMIF_LP_MODE_CLOCK_STOP:
0407         shift = CS_TIM_SHIFT;
0408         mask = CS_TIM_MASK;
0409         break;
0410     case EMIF_LP_MODE_SELF_REFRESH:
0411         /* Workaround for errata i735 */
0412         if (timeout < 6)
0413             timeout = 6;
0414 
0415         shift = SR_TIM_SHIFT;
0416         mask = SR_TIM_MASK;
0417         break;
0418     case EMIF_LP_MODE_PWR_DN:
0419         shift = PD_TIM_SHIFT;
0420         mask = PD_TIM_MASK;
0421         break;
0422     case EMIF_LP_MODE_DISABLE:
0423     default:
0424         mask = 0;
0425         shift = 0;
0426         break;
0427     }
0428     /* Round to maximum in case of overflow, BUT warn! */
0429     if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
0430         pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
0431                lpmode,
0432                timeout_perf,
0433                timeout_pwr,
0434                freq_threshold);
0435         WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
0436              timeout, mask >> shift);
0437         timeout = mask >> shift;
0438     }
0439 
0440     /* Setup required timing */
0441     pwr_mgmt_ctrl = (timeout << shift) & mask;
0442     /* setup a default mask for rest of the modes */
0443     pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
0444               ~mask;
0445 
0446     /* No CS_TIM in EMIF_4D5 */
0447     if (ip_rev == EMIF_4D5)
0448         pwr_mgmt_ctrl &= ~CS_TIM_MASK;
0449 
0450     pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
0451 
0452     return pwr_mgmt_ctrl;
0453 }
0454 
0455 /*
0456  * Get the temperature level of the EMIF instance:
0457  * Reads the MR4 register of attached SDRAM parts to find out the temperature
0458  * level. If there are two parts attached(one on each CS), then the temperature
0459  * level for the EMIF instance is the higher of the two temperatures.
0460  */
0461 static void get_temperature_level(struct emif_data *emif)
0462 {
0463     u32     temp, temperature_level;
0464     void __iomem    *base;
0465 
0466     base = emif->base;
0467 
0468     /* Read mode register 4 */
0469     writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
0470     temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
0471     temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
0472                 MR4_SDRAM_REF_RATE_SHIFT;
0473 
0474     if (emif->plat_data->device_info->cs1_used) {
0475         writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
0476         temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
0477         temp = (temp & MR4_SDRAM_REF_RATE_MASK)
0478                 >> MR4_SDRAM_REF_RATE_SHIFT;
0479         temperature_level = max(temp, temperature_level);
0480     }
0481 
0482     /* treat everything less than nominal(3) in MR4 as nominal */
0483     if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
0484         temperature_level = SDRAM_TEMP_NOMINAL;
0485 
0486     /* if we get reserved value in MR4 persist with the existing value */
0487     if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
0488         emif->temperature_level = temperature_level;
0489 }
0490 
0491 /*
0492  * setup_temperature_sensitive_regs() - set the timings for temperature
0493  * sensitive registers. This happens once at initialisation time based
0494  * on the temperature at boot time and subsequently based on the temperature
0495  * alert interrupt. Temperature alert can happen when the temperature
0496  * increases or drops. So this function can have the effect of either
0497  * derating the timings or going back to nominal values.
0498  */
0499 static void setup_temperature_sensitive_regs(struct emif_data *emif,
0500         struct emif_regs *regs)
0501 {
0502     u32     tim1, tim3, ref_ctrl, type;
0503     void __iomem    *base = emif->base;
0504     u32     temperature;
0505 
0506     type = emif->plat_data->device_info->type;
0507 
0508     tim1 = regs->sdram_tim1_shdw;
0509     tim3 = regs->sdram_tim3_shdw;
0510     ref_ctrl = regs->ref_ctrl_shdw;
0511 
0512     /* No de-rating for non-lpddr2 devices */
0513     if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
0514         goto out;
0515 
0516     temperature = emif->temperature_level;
0517     if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
0518         ref_ctrl = regs->ref_ctrl_shdw_derated;
0519     } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
0520         tim1 = regs->sdram_tim1_shdw_derated;
0521         tim3 = regs->sdram_tim3_shdw_derated;
0522         ref_ctrl = regs->ref_ctrl_shdw_derated;
0523     }
0524 
0525 out:
0526     writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
0527     writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
0528     writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
0529 }
0530 
0531 static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
0532 {
0533     u32     old_temp_level;
0534     irqreturn_t ret = IRQ_HANDLED;
0535     struct emif_custom_configs *custom_configs;
0536 
0537     spin_lock_irqsave(&emif_lock, irq_state);
0538     old_temp_level = emif->temperature_level;
0539     get_temperature_level(emif);
0540 
0541     if (unlikely(emif->temperature_level == old_temp_level)) {
0542         goto out;
0543     } else if (!emif->curr_regs) {
0544         dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
0545         goto out;
0546     }
0547 
0548     custom_configs = emif->plat_data->custom_configs;
0549 
0550     /*
0551      * IF we detect higher than "nominal rating" from DDR sensor
0552      * on an unsupported DDR part, shutdown system
0553      */
0554     if (custom_configs && !(custom_configs->mask &
0555                 EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
0556         if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
0557             dev_err(emif->dev,
0558                 "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
0559                 __func__, emif->temperature_level);
0560             /*
0561              * Temperature far too high - do kernel_power_off()
0562              * from thread context
0563              */
0564             emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
0565             ret = IRQ_WAKE_THREAD;
0566             goto out;
0567         }
0568     }
0569 
0570     if (emif->temperature_level < old_temp_level ||
0571         emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
0572         /*
0573          * Temperature coming down - defer handling to thread OR
0574          * Temperature far too high - do kernel_power_off() from
0575          * thread context
0576          */
0577         ret = IRQ_WAKE_THREAD;
0578     } else {
0579         /* Temperature is going up - handle immediately */
0580         setup_temperature_sensitive_regs(emif, emif->curr_regs);
0581         do_freq_update();
0582     }
0583 
0584 out:
0585     spin_unlock_irqrestore(&emif_lock, irq_state);
0586     return ret;
0587 }
0588 
0589 static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
0590 {
0591     u32         interrupts;
0592     struct emif_data    *emif = dev_id;
0593     void __iomem        *base = emif->base;
0594     struct device       *dev = emif->dev;
0595     irqreturn_t     ret = IRQ_HANDLED;
0596 
0597     /* Save the status and clear it */
0598     interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
0599     writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
0600 
0601     /*
0602      * Handle temperature alert
0603      * Temperature alert should be same for all ports
0604      * So, it's enough to process it only for one of the ports
0605      */
0606     if (interrupts & TA_SYS_MASK)
0607         ret = handle_temp_alert(base, emif);
0608 
0609     if (interrupts & ERR_SYS_MASK)
0610         dev_err(dev, "Access error from SYS port - %x\n", interrupts);
0611 
0612     if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
0613         /* Save the status and clear it */
0614         interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
0615         writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
0616 
0617         if (interrupts & ERR_LL_MASK)
0618             dev_err(dev, "Access error from LL port - %x\n",
0619                 interrupts);
0620     }
0621 
0622     return ret;
0623 }
0624 
0625 static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
0626 {
0627     struct emif_data    *emif = dev_id;
0628 
0629     if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
0630         dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
0631 
0632         /* If we have Power OFF ability, use it, else try restarting */
0633         if (kernel_can_power_off()) {
0634             kernel_power_off();
0635         } else {
0636             WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
0637             kernel_restart("SDRAM Over-temp Emergency restart");
0638         }
0639         return IRQ_HANDLED;
0640     }
0641 
0642     spin_lock_irqsave(&emif_lock, irq_state);
0643 
0644     if (emif->curr_regs) {
0645         setup_temperature_sensitive_regs(emif, emif->curr_regs);
0646         do_freq_update();
0647     } else {
0648         dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
0649     }
0650 
0651     spin_unlock_irqrestore(&emif_lock, irq_state);
0652 
0653     return IRQ_HANDLED;
0654 }
0655 
0656 static void clear_all_interrupts(struct emif_data *emif)
0657 {
0658     void __iomem    *base = emif->base;
0659 
0660     writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
0661         base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
0662     if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
0663         writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
0664             base + EMIF_LL_OCP_INTERRUPT_STATUS);
0665 }
0666 
0667 static void disable_and_clear_all_interrupts(struct emif_data *emif)
0668 {
0669     void __iomem        *base = emif->base;
0670 
0671     /* Disable all interrupts */
0672     writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
0673         base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
0674     if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
0675         writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
0676             base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
0677 
0678     /* Clear all interrupts */
0679     clear_all_interrupts(emif);
0680 }
0681 
0682 static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
0683 {
0684     u32     interrupts, type;
0685     void __iomem    *base = emif->base;
0686 
0687     type = emif->plat_data->device_info->type;
0688 
0689     clear_all_interrupts(emif);
0690 
0691     /* Enable interrupts for SYS interface */
0692     interrupts = EN_ERR_SYS_MASK;
0693     if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
0694         interrupts |= EN_TA_SYS_MASK;
0695     writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
0696 
0697     /* Enable interrupts for LL interface */
0698     if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
0699         /* TA need not be enabled for LL */
0700         interrupts = EN_ERR_LL_MASK;
0701         writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
0702     }
0703 
0704     /* setup IRQ handlers */
0705     return devm_request_threaded_irq(emif->dev, irq,
0706                     emif_interrupt_handler,
0707                     emif_threaded_isr,
0708                     0, dev_name(emif->dev),
0709                     emif);
0710 
0711 }
0712 
0713 static void __init_or_module emif_onetime_settings(struct emif_data *emif)
0714 {
0715     u32             pwr_mgmt_ctrl, zq, temp_alert_cfg;
0716     void __iomem            *base = emif->base;
0717     const struct lpddr2_addressing  *addressing;
0718     const struct ddr_device_info    *device_info;
0719 
0720     device_info = emif->plat_data->device_info;
0721     addressing = get_addressing_table(device_info);
0722 
0723     /*
0724      * Init power management settings
0725      * We don't know the frequency yet. Use a high frequency
0726      * value for a conservative timeout setting
0727      */
0728     pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
0729             emif->plat_data->ip_rev);
0730     emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
0731     writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
0732 
0733     /* Init ZQ calibration settings */
0734     zq = get_zq_config_reg(addressing, device_info->cs1_used,
0735         device_info->cal_resistors_per_cs);
0736     writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
0737 
0738     /* Check temperature level temperature level*/
0739     get_temperature_level(emif);
0740     if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
0741         dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
0742 
0743     /* Init temperature polling */
0744     temp_alert_cfg = get_temp_alert_config(addressing,
0745         emif->plat_data->custom_configs, device_info->cs1_used,
0746         device_info->io_width, get_emif_bus_width(emif));
0747     writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
0748 
0749     /*
0750      * Program external PHY control registers that are not frequency
0751      * dependent
0752      */
0753     if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
0754         return;
0755     writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
0756     writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
0757     writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
0758     writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
0759     writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
0760     writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
0761     writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
0762     writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
0763     writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
0764     writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
0765     writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
0766     writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
0767     writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
0768     writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
0769     writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
0770     writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
0771     writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
0772     writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
0773     writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
0774     writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
0775     writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
0776 }
0777 
0778 static void get_default_timings(struct emif_data *emif)
0779 {
0780     struct emif_platform_data *pd = emif->plat_data;
0781 
0782     pd->timings     = lpddr2_jedec_timings;
0783     pd->timings_arr_size    = ARRAY_SIZE(lpddr2_jedec_timings);
0784 
0785     dev_warn(emif->dev, "%s: using default timings\n", __func__);
0786 }
0787 
0788 static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
0789         u32 ip_rev, struct device *dev)
0790 {
0791     int valid;
0792 
0793     valid = (type == DDR_TYPE_LPDDR2_S4 ||
0794             type == DDR_TYPE_LPDDR2_S2)
0795         && (density >= DDR_DENSITY_64Mb
0796             && density <= DDR_DENSITY_8Gb)
0797         && (io_width >= DDR_IO_WIDTH_8
0798             && io_width <= DDR_IO_WIDTH_32);
0799 
0800     /* Combinations of EMIF and PHY revisions that we support today */
0801     switch (ip_rev) {
0802     case EMIF_4D:
0803         valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
0804         break;
0805     case EMIF_4D5:
0806         valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
0807         break;
0808     default:
0809         valid = 0;
0810     }
0811 
0812     if (!valid)
0813         dev_err(dev, "%s: invalid DDR details\n", __func__);
0814     return valid;
0815 }
0816 
0817 static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
0818         struct device *dev)
0819 {
0820     int valid = 1;
0821 
0822     if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
0823         (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
0824         valid = cust_cfgs->lpmode_freq_threshold &&
0825             cust_cfgs->lpmode_timeout_performance &&
0826             cust_cfgs->lpmode_timeout_power;
0827 
0828     if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
0829         valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
0830 
0831     if (!valid)
0832         dev_warn(dev, "%s: invalid custom configs\n", __func__);
0833 
0834     return valid;
0835 }
0836 
0837 #if defined(CONFIG_OF)
0838 static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
0839         struct emif_data *emif)
0840 {
0841     struct emif_custom_configs  *cust_cfgs = NULL;
0842     int             len;
0843     const __be32            *lpmode, *poll_intvl;
0844 
0845     lpmode = of_get_property(np_emif, "low-power-mode", &len);
0846     poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
0847 
0848     if (lpmode || poll_intvl)
0849         cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
0850             GFP_KERNEL);
0851 
0852     if (!cust_cfgs)
0853         return;
0854 
0855     if (lpmode) {
0856         cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
0857         cust_cfgs->lpmode = be32_to_cpup(lpmode);
0858         of_property_read_u32(np_emif,
0859                 "low-power-mode-timeout-performance",
0860                 &cust_cfgs->lpmode_timeout_performance);
0861         of_property_read_u32(np_emif,
0862                 "low-power-mode-timeout-power",
0863                 &cust_cfgs->lpmode_timeout_power);
0864         of_property_read_u32(np_emif,
0865                 "low-power-mode-freq-threshold",
0866                 &cust_cfgs->lpmode_freq_threshold);
0867     }
0868 
0869     if (poll_intvl) {
0870         cust_cfgs->mask |=
0871                 EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
0872         cust_cfgs->temp_alert_poll_interval_ms =
0873                         be32_to_cpup(poll_intvl);
0874     }
0875 
0876     if (of_find_property(np_emif, "extended-temp-part", &len))
0877         cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
0878 
0879     if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
0880         devm_kfree(emif->dev, cust_cfgs);
0881         return;
0882     }
0883 
0884     emif->plat_data->custom_configs = cust_cfgs;
0885 }
0886 
0887 static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
0888         struct device_node *np_ddr,
0889         struct ddr_device_info *dev_info)
0890 {
0891     u32 density = 0, io_width = 0;
0892     int len;
0893 
0894     if (of_find_property(np_emif, "cs1-used", &len))
0895         dev_info->cs1_used = true;
0896 
0897     if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
0898         dev_info->cal_resistors_per_cs = true;
0899 
0900     if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
0901         dev_info->type = DDR_TYPE_LPDDR2_S4;
0902     else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
0903         dev_info->type = DDR_TYPE_LPDDR2_S2;
0904 
0905     of_property_read_u32(np_ddr, "density", &density);
0906     of_property_read_u32(np_ddr, "io-width", &io_width);
0907 
0908     /* Convert from density in Mb to the density encoding in jedc_ddr.h */
0909     if (density & (density - 1))
0910         dev_info->density = 0;
0911     else
0912         dev_info->density = __fls(density) - 5;
0913 
0914     /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
0915     if (io_width & (io_width - 1))
0916         dev_info->io_width = 0;
0917     else
0918         dev_info->io_width = __fls(io_width) - 1;
0919 }
0920 
0921 static struct emif_data * __init_or_module of_get_memory_device_details(
0922         struct device_node *np_emif, struct device *dev)
0923 {
0924     struct emif_data        *emif = NULL;
0925     struct ddr_device_info      *dev_info = NULL;
0926     struct emif_platform_data   *pd = NULL;
0927     struct device_node      *np_ddr;
0928     int             len;
0929 
0930     np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
0931     if (!np_ddr)
0932         goto error;
0933     emif    = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
0934     pd  = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
0935     dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
0936 
0937     if (!emif || !pd || !dev_info) {
0938         dev_err(dev, "%s: Out of memory!!\n",
0939             __func__);
0940         goto error;
0941     }
0942 
0943     emif->plat_data     = pd;
0944     pd->device_info     = dev_info;
0945     emif->dev       = dev;
0946     emif->np_ddr        = np_ddr;
0947     emif->temperature_level = SDRAM_TEMP_NOMINAL;
0948 
0949     if (of_device_is_compatible(np_emif, "ti,emif-4d"))
0950         emif->plat_data->ip_rev = EMIF_4D;
0951     else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
0952         emif->plat_data->ip_rev = EMIF_4D5;
0953 
0954     of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
0955 
0956     if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
0957         pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
0958 
0959     of_get_ddr_info(np_emif, np_ddr, dev_info);
0960     if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
0961             pd->device_info->io_width, pd->phy_type, pd->ip_rev,
0962             emif->dev)) {
0963         dev_err(dev, "%s: invalid device data!!\n", __func__);
0964         goto error;
0965     }
0966     /*
0967      * For EMIF instances other than EMIF1 see if the devices connected
0968      * are exactly same as on EMIF1(which is typically the case). If so,
0969      * mark it as a duplicate of EMIF1. This will save some memory and
0970      * computation.
0971      */
0972     if (emif1 && emif1->np_ddr == np_ddr) {
0973         emif->duplicate = true;
0974         goto out;
0975     } else if (emif1) {
0976         dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
0977             __func__);
0978     }
0979 
0980     of_get_custom_configs(np_emif, emif);
0981     emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
0982                     emif->plat_data->device_info->type,
0983                     &emif->plat_data->timings_arr_size);
0984 
0985     emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
0986     goto out;
0987 
0988 error:
0989     return NULL;
0990 out:
0991     return emif;
0992 }
0993 
0994 #else
0995 
0996 static struct emif_data * __init_or_module of_get_memory_device_details(
0997         struct device_node *np_emif, struct device *dev)
0998 {
0999     return NULL;
1000 }
1001 #endif
1002 
1003 static struct emif_data *__init_or_module get_device_details(
1004         struct platform_device *pdev)
1005 {
1006     u32             size;
1007     struct emif_data        *emif = NULL;
1008     struct ddr_device_info      *dev_info;
1009     struct emif_custom_configs  *cust_cfgs;
1010     struct emif_platform_data   *pd;
1011     struct device           *dev;
1012     void                *temp;
1013 
1014     pd = pdev->dev.platform_data;
1015     dev = &pdev->dev;
1016 
1017     if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
1018             pd->device_info->density, pd->device_info->io_width,
1019             pd->phy_type, pd->ip_rev, dev))) {
1020         dev_err(dev, "%s: invalid device data\n", __func__);
1021         goto error;
1022     }
1023 
1024     emif    = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
1025     temp    = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1026     dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
1027 
1028     if (!emif || !temp || !dev_info)
1029         goto error;
1030 
1031     memcpy(temp, pd, sizeof(*pd));
1032     pd = temp;
1033     memcpy(dev_info, pd->device_info, sizeof(*dev_info));
1034 
1035     pd->device_info     = dev_info;
1036     emif->plat_data     = pd;
1037     emif->dev       = dev;
1038     emif->temperature_level = SDRAM_TEMP_NOMINAL;
1039 
1040     /*
1041      * For EMIF instances other than EMIF1 see if the devices connected
1042      * are exactly same as on EMIF1(which is typically the case). If so,
1043      * mark it as a duplicate of EMIF1 and skip copying timings data.
1044      * This will save some memory and some computation later.
1045      */
1046     emif->duplicate = emif1 && (memcmp(dev_info,
1047         emif1->plat_data->device_info,
1048         sizeof(struct ddr_device_info)) == 0);
1049 
1050     if (emif->duplicate) {
1051         pd->timings = NULL;
1052         pd->min_tck = NULL;
1053         goto out;
1054     } else if (emif1) {
1055         dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
1056             __func__);
1057     }
1058 
1059     /*
1060      * Copy custom configs - ignore allocation error, if any, as
1061      * custom_configs is not very critical
1062      */
1063     cust_cfgs = pd->custom_configs;
1064     if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
1065         temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
1066         if (temp)
1067             memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
1068         pd->custom_configs = temp;
1069     }
1070 
1071     /*
1072      * Copy timings and min-tck values from platform data. If it is not
1073      * available or if memory allocation fails, use JEDEC defaults
1074      */
1075     size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
1076     if (pd->timings) {
1077         temp = devm_kzalloc(dev, size, GFP_KERNEL);
1078         if (temp) {
1079             memcpy(temp, pd->timings, size);
1080             pd->timings = temp;
1081         } else {
1082             get_default_timings(emif);
1083         }
1084     } else {
1085         get_default_timings(emif);
1086     }
1087 
1088     if (pd->min_tck) {
1089         temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
1090         if (temp) {
1091             memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
1092             pd->min_tck = temp;
1093         } else {
1094             pd->min_tck = &lpddr2_jedec_min_tck;
1095         }
1096     } else {
1097         pd->min_tck = &lpddr2_jedec_min_tck;
1098     }
1099 
1100 out:
1101     return emif;
1102 
1103 error:
1104     return NULL;
1105 }
1106 
1107 static int __init_or_module emif_probe(struct platform_device *pdev)
1108 {
1109     struct emif_data    *emif;
1110     int         irq, ret;
1111 
1112     if (pdev->dev.of_node)
1113         emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
1114     else
1115         emif = get_device_details(pdev);
1116 
1117     if (!emif) {
1118         pr_err("%s: error getting device data\n", __func__);
1119         goto error;
1120     }
1121 
1122     list_add(&emif->node, &device_list);
1123 
1124     /* Save pointers to each other in emif and device structures */
1125     emif->dev = &pdev->dev;
1126     platform_set_drvdata(pdev, emif);
1127 
1128     emif->base = devm_platform_ioremap_resource(pdev, 0);
1129     if (IS_ERR(emif->base))
1130         goto error;
1131 
1132     irq = platform_get_irq(pdev, 0);
1133     if (irq < 0)
1134         goto error;
1135 
1136     emif_onetime_settings(emif);
1137     emif_debugfs_init(emif);
1138     disable_and_clear_all_interrupts(emif);
1139     ret = setup_interrupts(emif, irq);
1140     if (ret)
1141         goto error;
1142 
1143     /* One-time actions taken on probing the first device */
1144     if (!emif1) {
1145         emif1 = emif;
1146 
1147         /*
1148          * TODO: register notifiers for frequency and voltage
1149          * change here once the respective frameworks are
1150          * available
1151          */
1152     }
1153 
1154     dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
1155         __func__, emif->base, irq);
1156 
1157     return 0;
1158 error:
1159     return -ENODEV;
1160 }
1161 
1162 static int __exit emif_remove(struct platform_device *pdev)
1163 {
1164     struct emif_data *emif = platform_get_drvdata(pdev);
1165 
1166     emif_debugfs_exit(emif);
1167 
1168     return 0;
1169 }
1170 
1171 static void emif_shutdown(struct platform_device *pdev)
1172 {
1173     struct emif_data    *emif = platform_get_drvdata(pdev);
1174 
1175     disable_and_clear_all_interrupts(emif);
1176 }
1177 
1178 #if defined(CONFIG_OF)
1179 static const struct of_device_id emif_of_match[] = {
1180         { .compatible = "ti,emif-4d" },
1181         { .compatible = "ti,emif-4d5" },
1182         {},
1183 };
1184 MODULE_DEVICE_TABLE(of, emif_of_match);
1185 #endif
1186 
1187 static struct platform_driver emif_driver = {
1188     .remove     = __exit_p(emif_remove),
1189     .shutdown   = emif_shutdown,
1190     .driver = {
1191         .name = "emif",
1192         .of_match_table = of_match_ptr(emif_of_match),
1193     },
1194 };
1195 
1196 module_platform_driver_probe(emif_driver, emif_probe);
1197 
1198 MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
1199 MODULE_LICENSE("GPL");
1200 MODULE_ALIAS("platform:emif");
1201 MODULE_AUTHOR("Texas Instruments Inc");