0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/of_device.h>
0010 #include <linux/delay.h>
0011 #include <linux/mmc/mmc.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/pm_opp.h>
0014 #include <linux/slab.h>
0015 #include <linux/iopoll.h>
0016 #include <linux/qcom_scm.h>
0017 #include <linux/regulator/consumer.h>
0018 #include <linux/interconnect.h>
0019 #include <linux/pinctrl/consumer.h>
0020 #include <linux/reset.h>
0021
0022 #include "sdhci-pltfm.h"
0023 #include "cqhci.h"
0024
0025 #define CORE_MCI_VERSION 0x50
0026 #define CORE_VERSION_MAJOR_SHIFT 28
0027 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
0028 #define CORE_VERSION_MINOR_MASK 0xff
0029
0030 #define CORE_MCI_GENERICS 0x70
0031 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
0032
0033 #define HC_MODE_EN 0x1
0034 #define CORE_POWER 0x0
0035 #define CORE_SW_RST BIT(7)
0036 #define FF_CLK_SW_RST_DIS BIT(13)
0037
0038 #define CORE_PWRCTL_BUS_OFF BIT(0)
0039 #define CORE_PWRCTL_BUS_ON BIT(1)
0040 #define CORE_PWRCTL_IO_LOW BIT(2)
0041 #define CORE_PWRCTL_IO_HIGH BIT(3)
0042 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
0043 #define CORE_PWRCTL_BUS_FAIL BIT(1)
0044 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
0045 #define CORE_PWRCTL_IO_FAIL BIT(3)
0046 #define REQ_BUS_OFF BIT(0)
0047 #define REQ_BUS_ON BIT(1)
0048 #define REQ_IO_LOW BIT(2)
0049 #define REQ_IO_HIGH BIT(3)
0050 #define INT_MASK 0xf
0051 #define MAX_PHASES 16
0052 #define CORE_DLL_LOCK BIT(7)
0053 #define CORE_DDR_DLL_LOCK BIT(11)
0054 #define CORE_DLL_EN BIT(16)
0055 #define CORE_CDR_EN BIT(17)
0056 #define CORE_CK_OUT_EN BIT(18)
0057 #define CORE_CDR_EXT_EN BIT(19)
0058 #define CORE_DLL_PDN BIT(29)
0059 #define CORE_DLL_RST BIT(30)
0060 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
0061
0062 #define CORE_DDR_CAL_EN BIT(0)
0063 #define CORE_FLL_CYCLE_CNT BIT(18)
0064 #define CORE_DLL_CLOCK_DISABLE BIT(21)
0065
0066 #define DLL_USR_CTL_POR_VAL 0x10800
0067 #define ENABLE_DLL_LOCK_STATUS BIT(26)
0068 #define FINE_TUNE_MODE_EN BIT(27)
0069 #define BIAS_OK_SIGNAL BIT(29)
0070
0071 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
0072 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
0073
0074 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
0075 #define CORE_CLK_PWRSAVE BIT(1)
0076 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
0077 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
0078 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
0079 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
0080 #define CORE_IO_PAD_PWR_SWITCH BIT(16)
0081 #define CORE_HC_SELECT_IN_EN BIT(18)
0082 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
0083 #define CORE_HC_SELECT_IN_MASK (7 << 19)
0084
0085 #define CORE_3_0V_SUPPORT BIT(25)
0086 #define CORE_1_8V_SUPPORT BIT(26)
0087 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
0088
0089 #define CORE_CSR_CDC_CTLR_CFG0 0x130
0090 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
0091 #define CORE_HW_AUTOCAL_ENA BIT(17)
0092
0093 #define CORE_CSR_CDC_CTLR_CFG1 0x134
0094 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
0095 #define CORE_TIMER_ENA BIT(16)
0096
0097 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
0098 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
0099 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
0100 #define CORE_CDC_OFFSET_CFG 0x14C
0101 #define CORE_CSR_CDC_DELAY_CFG 0x150
0102 #define CORE_CDC_SLAVE_DDA_CFG 0x160
0103 #define CORE_CSR_CDC_STATUS0 0x164
0104 #define CORE_CALIBRATION_DONE BIT(0)
0105
0106 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
0107
0108 #define CORE_CSR_CDC_GEN_CFG 0x178
0109 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
0110 #define CORE_CDC_SWITCH_RC_EN BIT(1)
0111
0112 #define CORE_CDC_T4_DLY_SEL BIT(0)
0113 #define CORE_CMDIN_RCLK_EN BIT(1)
0114 #define CORE_START_CDC_TRAFFIC BIT(6)
0115
0116 #define CORE_PWRSAVE_DLL BIT(3)
0117
0118 #define DDR_CONFIG_POR_VAL 0x80040873
0119
0120
0121 #define INVALID_TUNING_PHASE -1
0122 #define SDHCI_MSM_MIN_CLOCK 400000
0123 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
0124
0125 #define CDR_SELEXT_SHIFT 20
0126 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
0127 #define CMUX_SHIFT_PHASE_SHIFT 24
0128 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
0129
0130 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
0131
0132
0133 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
0134
0135
0136 #define MMC_VQMMC_MAX_LOAD_UA 325000
0137
0138 #define msm_host_readl(msm_host, host, offset) \
0139 msm_host->var_ops->msm_readl_relaxed(host, offset)
0140
0141 #define msm_host_writel(msm_host, val, host, offset) \
0142 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
0143
0144
0145 #define CQHCI_VENDOR_CFG1 0xA00
0146 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
0147
0148 struct sdhci_msm_offset {
0149 u32 core_hc_mode;
0150 u32 core_mci_data_cnt;
0151 u32 core_mci_status;
0152 u32 core_mci_fifo_cnt;
0153 u32 core_mci_version;
0154 u32 core_generics;
0155 u32 core_testbus_config;
0156 u32 core_testbus_sel2_bit;
0157 u32 core_testbus_ena;
0158 u32 core_testbus_sel2;
0159 u32 core_pwrctl_status;
0160 u32 core_pwrctl_mask;
0161 u32 core_pwrctl_clear;
0162 u32 core_pwrctl_ctl;
0163 u32 core_sdcc_debug_reg;
0164 u32 core_dll_config;
0165 u32 core_dll_status;
0166 u32 core_vendor_spec;
0167 u32 core_vendor_spec_adma_err_addr0;
0168 u32 core_vendor_spec_adma_err_addr1;
0169 u32 core_vendor_spec_func2;
0170 u32 core_vendor_spec_capabilities0;
0171 u32 core_ddr_200_cfg;
0172 u32 core_vendor_spec3;
0173 u32 core_dll_config_2;
0174 u32 core_dll_config_3;
0175 u32 core_ddr_config_old;
0176 u32 core_ddr_config;
0177 u32 core_dll_usr_ctl;
0178 };
0179
0180 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
0181 .core_mci_data_cnt = 0x35c,
0182 .core_mci_status = 0x324,
0183 .core_mci_fifo_cnt = 0x308,
0184 .core_mci_version = 0x318,
0185 .core_generics = 0x320,
0186 .core_testbus_config = 0x32c,
0187 .core_testbus_sel2_bit = 3,
0188 .core_testbus_ena = (1 << 31),
0189 .core_testbus_sel2 = (1 << 3),
0190 .core_pwrctl_status = 0x240,
0191 .core_pwrctl_mask = 0x244,
0192 .core_pwrctl_clear = 0x248,
0193 .core_pwrctl_ctl = 0x24c,
0194 .core_sdcc_debug_reg = 0x358,
0195 .core_dll_config = 0x200,
0196 .core_dll_status = 0x208,
0197 .core_vendor_spec = 0x20c,
0198 .core_vendor_spec_adma_err_addr0 = 0x214,
0199 .core_vendor_spec_adma_err_addr1 = 0x218,
0200 .core_vendor_spec_func2 = 0x210,
0201 .core_vendor_spec_capabilities0 = 0x21c,
0202 .core_ddr_200_cfg = 0x224,
0203 .core_vendor_spec3 = 0x250,
0204 .core_dll_config_2 = 0x254,
0205 .core_dll_config_3 = 0x258,
0206 .core_ddr_config = 0x25c,
0207 .core_dll_usr_ctl = 0x388,
0208 };
0209
0210 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
0211 .core_hc_mode = 0x78,
0212 .core_mci_data_cnt = 0x30,
0213 .core_mci_status = 0x34,
0214 .core_mci_fifo_cnt = 0x44,
0215 .core_mci_version = 0x050,
0216 .core_generics = 0x70,
0217 .core_testbus_config = 0x0cc,
0218 .core_testbus_sel2_bit = 4,
0219 .core_testbus_ena = (1 << 3),
0220 .core_testbus_sel2 = (1 << 4),
0221 .core_pwrctl_status = 0xdc,
0222 .core_pwrctl_mask = 0xe0,
0223 .core_pwrctl_clear = 0xe4,
0224 .core_pwrctl_ctl = 0xe8,
0225 .core_sdcc_debug_reg = 0x124,
0226 .core_dll_config = 0x100,
0227 .core_dll_status = 0x108,
0228 .core_vendor_spec = 0x10c,
0229 .core_vendor_spec_adma_err_addr0 = 0x114,
0230 .core_vendor_spec_adma_err_addr1 = 0x118,
0231 .core_vendor_spec_func2 = 0x110,
0232 .core_vendor_spec_capabilities0 = 0x11c,
0233 .core_ddr_200_cfg = 0x184,
0234 .core_vendor_spec3 = 0x1b0,
0235 .core_dll_config_2 = 0x1b4,
0236 .core_ddr_config_old = 0x1b8,
0237 .core_ddr_config = 0x1bc,
0238 };
0239
0240 struct sdhci_msm_variant_ops {
0241 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
0242 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
0243 u32 offset);
0244 };
0245
0246
0247
0248
0249
0250 struct sdhci_msm_variant_info {
0251 bool mci_removed;
0252 bool restore_dll_config;
0253 const struct sdhci_msm_variant_ops *var_ops;
0254 const struct sdhci_msm_offset *offset;
0255 };
0256
0257 struct sdhci_msm_host {
0258 struct platform_device *pdev;
0259 void __iomem *core_mem;
0260 void __iomem *ice_mem;
0261 int pwr_irq;
0262 struct clk *bus_clk;
0263 struct clk *xo_clk;
0264
0265 struct clk_bulk_data bulk_clks[5];
0266 unsigned long clk_rate;
0267 struct mmc_host *mmc;
0268 bool use_14lpp_dll_reset;
0269 bool tuning_done;
0270 bool calibration_done;
0271 u8 saved_tuning_phase;
0272 bool use_cdclp533;
0273 u32 curr_pwr_state;
0274 u32 curr_io_level;
0275 wait_queue_head_t pwr_irq_wait;
0276 bool pwr_irq_flag;
0277 u32 caps_0;
0278 bool mci_removed;
0279 bool restore_dll_config;
0280 const struct sdhci_msm_variant_ops *var_ops;
0281 const struct sdhci_msm_offset *offset;
0282 bool use_cdr;
0283 u32 transfer_mode;
0284 bool updated_ddr_cfg;
0285 bool uses_tassadar_dll;
0286 u32 dll_config;
0287 u32 ddr_config;
0288 bool vqmmc_enabled;
0289 };
0290
0291 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
0292 {
0293 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0294 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0295
0296 return msm_host->offset;
0297 }
0298
0299
0300
0301
0302
0303 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
0304 u32 offset)
0305 {
0306 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0307 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0308
0309 return readl_relaxed(msm_host->core_mem + offset);
0310 }
0311
0312 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
0313 u32 offset)
0314 {
0315 return readl_relaxed(host->ioaddr + offset);
0316 }
0317
0318 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
0319 struct sdhci_host *host, u32 offset)
0320 {
0321 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0322 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0323
0324 writel_relaxed(val, msm_host->core_mem + offset);
0325 }
0326
0327 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
0328 struct sdhci_host *host, u32 offset)
0329 {
0330 writel_relaxed(val, host->ioaddr + offset);
0331 }
0332
0333 static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host)
0334 {
0335 struct mmc_ios ios = host->mmc->ios;
0336
0337
0338
0339
0340
0341
0342 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
0343 ios.timing == MMC_TIMING_MMC_DDR52 ||
0344 ios.timing == MMC_TIMING_MMC_HS400 ||
0345 host->flags & SDHCI_HS400_TUNING)
0346 return 2;
0347 return 1;
0348 }
0349
0350 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
0351 unsigned int clock)
0352 {
0353 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0354 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0355 struct mmc_ios curr_ios = host->mmc->ios;
0356 struct clk *core_clk = msm_host->bulk_clks[0].clk;
0357 unsigned long achieved_rate;
0358 unsigned int desired_rate;
0359 unsigned int mult;
0360 int rc;
0361
0362 mult = msm_get_clock_mult_for_bus_mode(host);
0363 desired_rate = clock * mult;
0364 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate);
0365 if (rc) {
0366 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
0367 mmc_hostname(host->mmc), desired_rate, curr_ios.timing);
0368 return;
0369 }
0370
0371
0372
0373
0374
0375
0376 achieved_rate = clk_get_rate(core_clk);
0377 if (achieved_rate > desired_rate)
0378 pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n",
0379 mmc_hostname(host->mmc), desired_rate, achieved_rate);
0380 host->mmc->actual_clock = achieved_rate / mult;
0381
0382
0383 msm_host->clk_rate = desired_rate;
0384
0385 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
0386 mmc_hostname(host->mmc), achieved_rate, curr_ios.timing);
0387 }
0388
0389
0390 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
0391 {
0392 u32 wait_cnt = 50;
0393 u8 ck_out_en;
0394 struct mmc_host *mmc = host->mmc;
0395 const struct sdhci_msm_offset *msm_offset =
0396 sdhci_priv_msm_offset(host);
0397
0398
0399 ck_out_en = !!(readl_relaxed(host->ioaddr +
0400 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
0401
0402 while (ck_out_en != poll) {
0403 if (--wait_cnt == 0) {
0404 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
0405 mmc_hostname(mmc), poll);
0406 return -ETIMEDOUT;
0407 }
0408 udelay(1);
0409
0410 ck_out_en = !!(readl_relaxed(host->ioaddr +
0411 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
0412 }
0413
0414 return 0;
0415 }
0416
0417 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
0418 {
0419 int rc;
0420 static const u8 grey_coded_phase_table[] = {
0421 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
0422 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
0423 };
0424 unsigned long flags;
0425 u32 config;
0426 struct mmc_host *mmc = host->mmc;
0427 const struct sdhci_msm_offset *msm_offset =
0428 sdhci_priv_msm_offset(host);
0429
0430 if (phase > 0xf)
0431 return -EINVAL;
0432
0433 spin_lock_irqsave(&host->lock, flags);
0434
0435 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0436 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
0437 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
0438 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0439
0440
0441 rc = msm_dll_poll_ck_out_en(host, 0);
0442 if (rc)
0443 goto err_out;
0444
0445
0446
0447
0448
0449 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0450 config &= ~CDR_SELEXT_MASK;
0451 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
0452 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0453
0454 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0455 config |= CORE_CK_OUT_EN;
0456 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0457
0458
0459 rc = msm_dll_poll_ck_out_en(host, 1);
0460 if (rc)
0461 goto err_out;
0462
0463 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0464 config |= CORE_CDR_EN;
0465 config &= ~CORE_CDR_EXT_EN;
0466 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0467 goto out;
0468
0469 err_out:
0470 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
0471 mmc_hostname(mmc), phase);
0472 out:
0473 spin_unlock_irqrestore(&host->lock, flags);
0474 return rc;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
0488 u8 *phase_table, u8 total_phases)
0489 {
0490 int ret;
0491 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
0492 u8 phases_per_row[MAX_PHASES] = { 0 };
0493 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
0494 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
0495 bool phase_0_found = false, phase_15_found = false;
0496 struct mmc_host *mmc = host->mmc;
0497
0498 if (!total_phases || (total_phases > MAX_PHASES)) {
0499 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
0500 mmc_hostname(mmc), total_phases);
0501 return -EINVAL;
0502 }
0503
0504 for (cnt = 0; cnt < total_phases; cnt++) {
0505 ranges[row_index][col_index] = phase_table[cnt];
0506 phases_per_row[row_index] += 1;
0507 col_index++;
0508
0509 if ((cnt + 1) == total_phases) {
0510 continue;
0511
0512 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
0513 row_index++;
0514 col_index = 0;
0515 }
0516 }
0517
0518 if (row_index >= MAX_PHASES)
0519 return -EINVAL;
0520
0521
0522 if (!ranges[0][0]) {
0523 phase_0_found = true;
0524 phase_0_raw_index = 0;
0525
0526 for (cnt = 1; cnt <= row_index; cnt++) {
0527 if (phases_per_row[cnt]) {
0528 for (i = 0; i < phases_per_row[cnt]; i++) {
0529 if (ranges[cnt][i] == 15) {
0530 phase_15_found = true;
0531 phase_15_raw_index = cnt;
0532 break;
0533 }
0534 }
0535 }
0536 }
0537 }
0538
0539
0540 if (phase_0_found && phase_15_found) {
0541
0542 u8 phases_0 = phases_per_row[phase_0_raw_index];
0543
0544 u8 phases_15 = phases_per_row[phase_15_raw_index];
0545
0546 if (phases_0 + phases_15 >= MAX_PHASES)
0547
0548
0549
0550
0551
0552 return -EINVAL;
0553
0554
0555 i = phases_15;
0556 for (cnt = 0; cnt < phases_0; cnt++) {
0557 ranges[phase_15_raw_index][i] =
0558 ranges[phase_0_raw_index][cnt];
0559 if (++i >= MAX_PHASES)
0560 break;
0561 }
0562
0563 phases_per_row[phase_0_raw_index] = 0;
0564 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
0565 }
0566
0567 for (cnt = 0; cnt <= row_index; cnt++) {
0568 if (phases_per_row[cnt] > curr_max) {
0569 curr_max = phases_per_row[cnt];
0570 selected_row_index = cnt;
0571 }
0572 }
0573
0574 i = (curr_max * 3) / 4;
0575 if (i)
0576 i--;
0577
0578 ret = ranges[selected_row_index][i];
0579
0580 if (ret >= MAX_PHASES) {
0581 ret = -EINVAL;
0582 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
0583 mmc_hostname(mmc), ret);
0584 }
0585
0586 return ret;
0587 }
0588
0589 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
0590 {
0591 u32 mclk_freq = 0, config;
0592 const struct sdhci_msm_offset *msm_offset =
0593 sdhci_priv_msm_offset(host);
0594
0595
0596 if (host->clock <= 112000000)
0597 mclk_freq = 0;
0598 else if (host->clock <= 125000000)
0599 mclk_freq = 1;
0600 else if (host->clock <= 137000000)
0601 mclk_freq = 2;
0602 else if (host->clock <= 150000000)
0603 mclk_freq = 3;
0604 else if (host->clock <= 162000000)
0605 mclk_freq = 4;
0606 else if (host->clock <= 175000000)
0607 mclk_freq = 5;
0608 else if (host->clock <= 187000000)
0609 mclk_freq = 6;
0610 else if (host->clock <= 200000000)
0611 mclk_freq = 7;
0612
0613 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0614 config &= ~CMUX_SHIFT_PHASE_MASK;
0615 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
0616 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0617 }
0618
0619
0620 static int msm_init_cm_dll(struct sdhci_host *host)
0621 {
0622 struct mmc_host *mmc = host->mmc;
0623 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0624 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0625 int wait_cnt = 50;
0626 unsigned long flags, xo_clk = 0;
0627 u32 config;
0628 const struct sdhci_msm_offset *msm_offset =
0629 msm_host->offset;
0630
0631 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
0632 xo_clk = clk_get_rate(msm_host->xo_clk);
0633
0634 spin_lock_irqsave(&host->lock, flags);
0635
0636
0637
0638
0639
0640
0641 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
0642 config &= ~CORE_CLK_PWRSAVE;
0643 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
0644
0645 if (msm_host->dll_config)
0646 writel_relaxed(msm_host->dll_config,
0647 host->ioaddr + msm_offset->core_dll_config);
0648
0649 if (msm_host->use_14lpp_dll_reset) {
0650 config = readl_relaxed(host->ioaddr +
0651 msm_offset->core_dll_config);
0652 config &= ~CORE_CK_OUT_EN;
0653 writel_relaxed(config, host->ioaddr +
0654 msm_offset->core_dll_config);
0655
0656 config = readl_relaxed(host->ioaddr +
0657 msm_offset->core_dll_config_2);
0658 config |= CORE_DLL_CLOCK_DISABLE;
0659 writel_relaxed(config, host->ioaddr +
0660 msm_offset->core_dll_config_2);
0661 }
0662
0663 config = readl_relaxed(host->ioaddr +
0664 msm_offset->core_dll_config);
0665 config |= CORE_DLL_RST;
0666 writel_relaxed(config, host->ioaddr +
0667 msm_offset->core_dll_config);
0668
0669 config = readl_relaxed(host->ioaddr +
0670 msm_offset->core_dll_config);
0671 config |= CORE_DLL_PDN;
0672 writel_relaxed(config, host->ioaddr +
0673 msm_offset->core_dll_config);
0674
0675 if (!msm_host->dll_config)
0676 msm_cm_dll_set_freq(host);
0677
0678 if (msm_host->use_14lpp_dll_reset &&
0679 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
0680 u32 mclk_freq = 0;
0681
0682 config = readl_relaxed(host->ioaddr +
0683 msm_offset->core_dll_config_2);
0684 config &= CORE_FLL_CYCLE_CNT;
0685 if (config)
0686 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
0687 xo_clk);
0688 else
0689 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
0690 xo_clk);
0691
0692 config = readl_relaxed(host->ioaddr +
0693 msm_offset->core_dll_config_2);
0694 config &= ~(0xFF << 10);
0695 config |= mclk_freq << 10;
0696
0697 writel_relaxed(config, host->ioaddr +
0698 msm_offset->core_dll_config_2);
0699
0700 udelay(5);
0701 }
0702
0703 config = readl_relaxed(host->ioaddr +
0704 msm_offset->core_dll_config);
0705 config &= ~CORE_DLL_RST;
0706 writel_relaxed(config, host->ioaddr +
0707 msm_offset->core_dll_config);
0708
0709 config = readl_relaxed(host->ioaddr +
0710 msm_offset->core_dll_config);
0711 config &= ~CORE_DLL_PDN;
0712 writel_relaxed(config, host->ioaddr +
0713 msm_offset->core_dll_config);
0714
0715 if (msm_host->use_14lpp_dll_reset) {
0716 if (!msm_host->dll_config)
0717 msm_cm_dll_set_freq(host);
0718 config = readl_relaxed(host->ioaddr +
0719 msm_offset->core_dll_config_2);
0720 config &= ~CORE_DLL_CLOCK_DISABLE;
0721 writel_relaxed(config, host->ioaddr +
0722 msm_offset->core_dll_config_2);
0723 }
0724
0725
0726
0727
0728
0729 if (msm_host->uses_tassadar_dll) {
0730 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
0731 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
0732 writel_relaxed(config, host->ioaddr +
0733 msm_offset->core_dll_usr_ctl);
0734
0735 config = readl_relaxed(host->ioaddr +
0736 msm_offset->core_dll_config_3);
0737 config &= ~0xFF;
0738 if (msm_host->clk_rate < 150000000)
0739 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
0740 else
0741 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
0742 writel_relaxed(config, host->ioaddr +
0743 msm_offset->core_dll_config_3);
0744 }
0745
0746 config = readl_relaxed(host->ioaddr +
0747 msm_offset->core_dll_config);
0748 config |= CORE_DLL_EN;
0749 writel_relaxed(config, host->ioaddr +
0750 msm_offset->core_dll_config);
0751
0752 config = readl_relaxed(host->ioaddr +
0753 msm_offset->core_dll_config);
0754 config |= CORE_CK_OUT_EN;
0755 writel_relaxed(config, host->ioaddr +
0756 msm_offset->core_dll_config);
0757
0758
0759 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
0760 CORE_DLL_LOCK)) {
0761
0762 if (--wait_cnt == 0) {
0763 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
0764 mmc_hostname(mmc));
0765 spin_unlock_irqrestore(&host->lock, flags);
0766 return -ETIMEDOUT;
0767 }
0768 udelay(1);
0769 }
0770
0771 spin_unlock_irqrestore(&host->lock, flags);
0772 return 0;
0773 }
0774
0775 static void msm_hc_select_default(struct sdhci_host *host)
0776 {
0777 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0778 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0779 u32 config;
0780 const struct sdhci_msm_offset *msm_offset =
0781 msm_host->offset;
0782
0783 if (!msm_host->use_cdclp533) {
0784 config = readl_relaxed(host->ioaddr +
0785 msm_offset->core_vendor_spec3);
0786 config &= ~CORE_PWRSAVE_DLL;
0787 writel_relaxed(config, host->ioaddr +
0788 msm_offset->core_vendor_spec3);
0789 }
0790
0791 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
0792 config &= ~CORE_HC_MCLK_SEL_MASK;
0793 config |= CORE_HC_MCLK_SEL_DFLT;
0794 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
0795
0796
0797
0798
0799
0800
0801
0802
0803 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
0804 config &= ~CORE_HC_SELECT_IN_EN;
0805 config &= ~CORE_HC_SELECT_IN_MASK;
0806 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
0807
0808
0809
0810
0811
0812 wmb();
0813 }
0814
0815 static void msm_hc_select_hs400(struct sdhci_host *host)
0816 {
0817 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0818 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0819 struct mmc_ios ios = host->mmc->ios;
0820 u32 config, dll_lock;
0821 int rc;
0822 const struct sdhci_msm_offset *msm_offset =
0823 msm_host->offset;
0824
0825
0826 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
0827 config &= ~CORE_HC_MCLK_SEL_MASK;
0828 config |= CORE_HC_MCLK_SEL_HS400;
0829
0830 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
0831
0832
0833
0834
0835 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
0836 !msm_host->calibration_done) {
0837 config = readl_relaxed(host->ioaddr +
0838 msm_offset->core_vendor_spec);
0839 config |= CORE_HC_SELECT_IN_HS400;
0840 config |= CORE_HC_SELECT_IN_EN;
0841 writel_relaxed(config, host->ioaddr +
0842 msm_offset->core_vendor_spec);
0843 }
0844 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
0845
0846
0847
0848
0849
0850 rc = readl_relaxed_poll_timeout(host->ioaddr +
0851 msm_offset->core_dll_status,
0852 dll_lock,
0853 (dll_lock &
0854 (CORE_DLL_LOCK |
0855 CORE_DDR_DLL_LOCK)), 10,
0856 1000);
0857 if (rc == -ETIMEDOUT)
0858 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
0859 mmc_hostname(host->mmc), dll_lock);
0860 }
0861
0862
0863
0864
0865 wmb();
0866 }
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
0887 {
0888 struct mmc_ios ios = host->mmc->ios;
0889
0890 if (ios.timing == MMC_TIMING_MMC_HS400 ||
0891 host->flags & SDHCI_HS400_TUNING)
0892 msm_hc_select_hs400(host);
0893 else
0894 msm_hc_select_default(host);
0895 }
0896
0897 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
0898 {
0899 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
0900 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0901 u32 config, calib_done;
0902 int ret;
0903 const struct sdhci_msm_offset *msm_offset =
0904 msm_host->offset;
0905
0906 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
0907
0908
0909
0910
0911
0912 ret = msm_init_cm_dll(host);
0913 if (ret)
0914 goto out;
0915
0916
0917 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
0918 if (ret)
0919 goto out;
0920
0921 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
0922 config |= CORE_CMD_DAT_TRACK_SEL;
0923 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
0924
0925 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
0926 config &= ~CORE_CDC_T4_DLY_SEL;
0927 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
0928
0929 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
0930 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
0931 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
0932
0933 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
0934 config |= CORE_CDC_SWITCH_RC_EN;
0935 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
0936
0937 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
0938 config &= ~CORE_START_CDC_TRAFFIC;
0939 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
0940
0941
0942
0943 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0944 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
0945 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
0946 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
0947 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
0948 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
0949 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
0950 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
0951 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
0952
0953
0954
0955 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0956 config |= CORE_SW_TRIG_FULL_CALIB;
0957 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0958
0959 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0960 config &= ~CORE_SW_TRIG_FULL_CALIB;
0961 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0962
0963 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0964 config |= CORE_HW_AUTOCAL_ENA;
0965 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
0966
0967 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
0968 config |= CORE_TIMER_ENA;
0969 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
0970
0971 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
0972 calib_done,
0973 (calib_done & CORE_CALIBRATION_DONE),
0974 1, 50);
0975
0976 if (ret == -ETIMEDOUT) {
0977 pr_err("%s: %s: CDC calibration was not completed\n",
0978 mmc_hostname(host->mmc), __func__);
0979 goto out;
0980 }
0981
0982 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
0983 & CORE_CDC_ERROR_CODE_MASK;
0984 if (ret) {
0985 pr_err("%s: %s: CDC error code %d\n",
0986 mmc_hostname(host->mmc), __func__, ret);
0987 ret = -EINVAL;
0988 goto out;
0989 }
0990
0991 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
0992 config |= CORE_START_CDC_TRAFFIC;
0993 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
0994 out:
0995 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
0996 __func__, ret);
0997 return ret;
0998 }
0999
1000 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
1001 {
1002 struct mmc_host *mmc = host->mmc;
1003 u32 dll_status, config, ddr_cfg_offset;
1004 int ret;
1005 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1006 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1007 const struct sdhci_msm_offset *msm_offset =
1008 sdhci_priv_msm_offset(host);
1009
1010 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (msm_host->updated_ddr_cfg)
1020 ddr_cfg_offset = msm_offset->core_ddr_config;
1021 else
1022 ddr_cfg_offset = msm_offset->core_ddr_config_old;
1023 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
1024
1025 if (mmc->ios.enhanced_strobe) {
1026 config = readl_relaxed(host->ioaddr +
1027 msm_offset->core_ddr_200_cfg);
1028 config |= CORE_CMDIN_RCLK_EN;
1029 writel_relaxed(config, host->ioaddr +
1030 msm_offset->core_ddr_200_cfg);
1031 }
1032
1033 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1034 config |= CORE_DDR_CAL_EN;
1035 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1036
1037 ret = readl_relaxed_poll_timeout(host->ioaddr +
1038 msm_offset->core_dll_status,
1039 dll_status,
1040 (dll_status & CORE_DDR_DLL_LOCK),
1041 10, 1000);
1042
1043 if (ret == -ETIMEDOUT) {
1044 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1045 mmc_hostname(host->mmc), __func__);
1046 goto out;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 if (!msm_host->use_14lpp_dll_reset) {
1058 config = readl_relaxed(host->ioaddr +
1059 msm_offset->core_vendor_spec3);
1060 config |= CORE_PWRSAVE_DLL;
1061 writel_relaxed(config, host->ioaddr +
1062 msm_offset->core_vendor_spec3);
1063 }
1064
1065
1066
1067
1068
1069 wmb();
1070 out:
1071 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1072 __func__, ret);
1073 return ret;
1074 }
1075
1076 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1077 {
1078 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1079 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1080 struct mmc_host *mmc = host->mmc;
1081 int ret;
1082 u32 config;
1083 const struct sdhci_msm_offset *msm_offset =
1084 msm_host->offset;
1085
1086 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1087
1088
1089
1090
1091
1092 ret = msm_init_cm_dll(host);
1093 if (ret)
1094 goto out;
1095
1096 if (!mmc->ios.enhanced_strobe) {
1097
1098 ret = msm_config_cm_dll_phase(host,
1099 msm_host->saved_tuning_phase);
1100 if (ret)
1101 goto out;
1102 config = readl_relaxed(host->ioaddr +
1103 msm_offset->core_dll_config);
1104 config |= CORE_CMD_DAT_TRACK_SEL;
1105 writel_relaxed(config, host->ioaddr +
1106 msm_offset->core_dll_config);
1107 }
1108
1109 if (msm_host->use_cdclp533)
1110 ret = sdhci_msm_cdclp533_calibration(host);
1111 else
1112 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1113 out:
1114 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1115 __func__, ret);
1116 return ret;
1117 }
1118
1119 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1120 {
1121 struct mmc_ios *ios = &host->mmc->ios;
1122
1123
1124
1125
1126
1127 if (host->clock <= CORE_FREQ_100MHZ ||
1128 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1129 ios->timing == MMC_TIMING_MMC_HS200 ||
1130 ios->timing == MMC_TIMING_UHS_SDR104) ||
1131 ios->enhanced_strobe)
1132 return false;
1133
1134 return true;
1135 }
1136
1137 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1138 {
1139 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1140 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1141 int ret;
1142
1143
1144
1145
1146
1147 if (!sdhci_msm_is_tuning_needed(host))
1148 return 0;
1149
1150
1151 ret = msm_init_cm_dll(host);
1152 if (ret)
1153 return ret;
1154
1155
1156 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1157
1158 return ret;
1159 }
1160
1161 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1162 {
1163 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1164 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1165 msm_offset->core_dll_config);
1166
1167 config = oldconfig;
1168 if (enable) {
1169 config |= CORE_CDR_EN;
1170 config &= ~CORE_CDR_EXT_EN;
1171 } else {
1172 config &= ~CORE_CDR_EN;
1173 config |= CORE_CDR_EXT_EN;
1174 }
1175
1176 if (config != oldconfig) {
1177 writel_relaxed(config, host->ioaddr +
1178 msm_offset->core_dll_config);
1179 }
1180 }
1181
1182 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1183 {
1184 struct sdhci_host *host = mmc_priv(mmc);
1185 int tuning_seq_cnt = 10;
1186 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1187 int rc;
1188 struct mmc_ios ios = host->mmc->ios;
1189 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1190 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1191
1192 if (!sdhci_msm_is_tuning_needed(host)) {
1193 msm_host->use_cdr = false;
1194 sdhci_msm_set_cdr(host, false);
1195 return 0;
1196 }
1197
1198
1199 msm_host->use_cdr = true;
1200
1201
1202
1203
1204
1205 msm_host->tuning_done = 0;
1206
1207
1208
1209
1210
1211
1212 if (host->flags & SDHCI_HS400_TUNING) {
1213 sdhci_msm_hc_select_mode(host);
1214 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1215 host->flags &= ~SDHCI_HS400_TUNING;
1216 }
1217
1218 retry:
1219
1220 rc = msm_init_cm_dll(host);
1221 if (rc)
1222 return rc;
1223
1224 phase = 0;
1225 do {
1226
1227 rc = msm_config_cm_dll_phase(host, phase);
1228 if (rc)
1229 return rc;
1230
1231 rc = mmc_send_tuning(mmc, opcode, NULL);
1232 if (!rc) {
1233
1234 tuned_phases[tuned_phase_cnt++] = phase;
1235 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1236 mmc_hostname(mmc), phase);
1237 }
1238 } while (++phase < ARRAY_SIZE(tuned_phases));
1239
1240 if (tuned_phase_cnt) {
1241 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
1242
1243
1244
1245
1246
1247
1248
1249 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
1250 mmc_hostname(mmc));
1251 if (--tuning_seq_cnt) {
1252 tuned_phase_cnt = 0;
1253 goto retry;
1254 }
1255 }
1256
1257 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1258 tuned_phase_cnt);
1259 if (rc < 0)
1260 return rc;
1261 else
1262 phase = rc;
1263
1264
1265
1266
1267
1268 rc = msm_config_cm_dll_phase(host, phase);
1269 if (rc)
1270 return rc;
1271 msm_host->saved_tuning_phase = phase;
1272 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1273 mmc_hostname(mmc), phase);
1274 } else {
1275 if (--tuning_seq_cnt)
1276 goto retry;
1277
1278 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1279 mmc_hostname(mmc));
1280 rc = -EIO;
1281 }
1282
1283 if (!rc)
1284 msm_host->tuning_done = true;
1285 return rc;
1286 }
1287
1288
1289
1290
1291
1292
1293
1294 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1295 {
1296 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1297 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1298 int ret;
1299
1300 if (host->clock > CORE_FREQ_100MHZ &&
1301 (msm_host->tuning_done || ios->enhanced_strobe) &&
1302 !msm_host->calibration_done) {
1303 ret = sdhci_msm_hs400_dll_calibration(host);
1304 if (!ret)
1305 msm_host->calibration_done = true;
1306 else
1307 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1308 mmc_hostname(host->mmc), ret);
1309 }
1310 }
1311
1312 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1313 unsigned int uhs)
1314 {
1315 struct mmc_host *mmc = host->mmc;
1316 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1317 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1318 u16 ctrl_2;
1319 u32 config;
1320 const struct sdhci_msm_offset *msm_offset =
1321 msm_host->offset;
1322
1323 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1324
1325 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1326 switch (uhs) {
1327 case MMC_TIMING_UHS_SDR12:
1328 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1329 break;
1330 case MMC_TIMING_UHS_SDR25:
1331 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1332 break;
1333 case MMC_TIMING_UHS_SDR50:
1334 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1335 break;
1336 case MMC_TIMING_MMC_HS400:
1337 case MMC_TIMING_MMC_HS200:
1338 case MMC_TIMING_UHS_SDR104:
1339 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1340 break;
1341 case MMC_TIMING_UHS_DDR50:
1342 case MMC_TIMING_MMC_DDR52:
1343 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1344 break;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353 if (host->clock <= CORE_FREQ_100MHZ) {
1354 if (uhs == MMC_TIMING_MMC_HS400 ||
1355 uhs == MMC_TIMING_MMC_HS200 ||
1356 uhs == MMC_TIMING_UHS_SDR104)
1357 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1358
1359
1360
1361
1362 config = readl_relaxed(host->ioaddr +
1363 msm_offset->core_dll_config);
1364 config |= CORE_DLL_RST;
1365 writel_relaxed(config, host->ioaddr +
1366 msm_offset->core_dll_config);
1367
1368 config = readl_relaxed(host->ioaddr +
1369 msm_offset->core_dll_config);
1370 config |= CORE_DLL_PDN;
1371 writel_relaxed(config, host->ioaddr +
1372 msm_offset->core_dll_config);
1373
1374
1375
1376
1377
1378 msm_host->calibration_done = false;
1379 }
1380
1381 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1382 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1383 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1384
1385 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1386 sdhci_msm_hs400(host, &mmc->ios);
1387 }
1388
1389 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
1390 {
1391 struct platform_device *pdev = msm_host->pdev;
1392 int ret;
1393
1394 if (level)
1395 ret = pinctrl_pm_select_default_state(&pdev->dev);
1396 else
1397 ret = pinctrl_pm_select_sleep_state(&pdev->dev);
1398
1399 return ret;
1400 }
1401
1402 static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
1403 {
1404 if (IS_ERR(mmc->supply.vmmc))
1405 return 0;
1406
1407 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
1408 }
1409
1410 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
1411 struct mmc_host *mmc, bool level)
1412 {
1413 int ret;
1414 struct mmc_ios ios;
1415
1416 if (msm_host->vqmmc_enabled == level)
1417 return 0;
1418
1419 if (level) {
1420
1421 if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
1422 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330;
1423 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT)
1424 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180;
1425
1426 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1427 ret = mmc_regulator_set_vqmmc(mmc, &ios);
1428 if (ret < 0) {
1429 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n",
1430 mmc_hostname(mmc), ret);
1431 goto out;
1432 }
1433 }
1434 ret = regulator_enable(mmc->supply.vqmmc);
1435 } else {
1436 ret = regulator_disable(mmc->supply.vqmmc);
1437 }
1438
1439 if (ret)
1440 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n",
1441 mmc_hostname(mmc), level ? "en":"dis", ret);
1442 else
1443 msm_host->vqmmc_enabled = level;
1444 out:
1445 return ret;
1446 }
1447
1448 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host,
1449 struct mmc_host *mmc, bool hpm)
1450 {
1451 int load, ret;
1452
1453 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0;
1454 ret = regulator_set_load(mmc->supply.vqmmc, load);
1455 if (ret)
1456 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n",
1457 mmc_hostname(mmc), ret);
1458 return ret;
1459 }
1460
1461 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host,
1462 struct mmc_host *mmc, bool level)
1463 {
1464 int ret;
1465 bool always_on;
1466
1467 if (IS_ERR(mmc->supply.vqmmc) ||
1468 (mmc->ios.power_mode == MMC_POWER_UNDEFINED))
1469 return 0;
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 always_on = !mmc_card_is_removable(mmc) &&
1481 mmc->card && mmc_card_mmc(mmc->card);
1482
1483 if (always_on)
1484 ret = msm_config_vqmmc_mode(msm_host, mmc, level);
1485 else
1486 ret = msm_toggle_vqmmc(msm_host, mmc, level);
1487
1488 return ret;
1489 }
1490
1491 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1492 {
1493 init_waitqueue_head(&msm_host->pwr_irq_wait);
1494 }
1495
1496 static inline void sdhci_msm_complete_pwr_irq_wait(
1497 struct sdhci_msm_host *msm_host)
1498 {
1499 wake_up(&msm_host->pwr_irq_wait);
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1512 {
1513 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1514 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1515 bool done = false;
1516 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1517 const struct sdhci_msm_offset *msm_offset =
1518 msm_host->offset;
1519
1520 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1521 mmc_hostname(host->mmc), __func__, req_type,
1522 msm_host->curr_pwr_state, msm_host->curr_io_level);
1523
1524
1525
1526
1527
1528
1529
1530 if (!msm_host->mci_removed)
1531 val = msm_host_readl(msm_host, host,
1532 msm_offset->core_generics);
1533 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1534 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1535 return;
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1551 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1552 mmc_hostname(host->mmc), req_type);
1553 return;
1554 }
1555 if ((req_type & msm_host->curr_pwr_state) ||
1556 (req_type & msm_host->curr_io_level))
1557 done = true;
1558
1559
1560
1561
1562
1563
1564 if (!done) {
1565 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1566 msm_host->pwr_irq_flag,
1567 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1568 dev_warn(&msm_host->pdev->dev,
1569 "%s: pwr_irq for req: (%d) timed out\n",
1570 mmc_hostname(host->mmc), req_type);
1571 }
1572 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1573 __func__, req_type);
1574 }
1575
1576 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1577 {
1578 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1579 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1580 const struct sdhci_msm_offset *msm_offset =
1581 msm_host->offset;
1582
1583 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1584 mmc_hostname(host->mmc),
1585 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1586 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1587 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1588 }
1589
1590 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1591 {
1592 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1593 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1594 struct mmc_host *mmc = host->mmc;
1595 u32 irq_status, irq_ack = 0;
1596 int retry = 10, ret;
1597 u32 pwr_state = 0, io_level = 0;
1598 u32 config;
1599 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1600
1601 irq_status = msm_host_readl(msm_host, host,
1602 msm_offset->core_pwrctl_status);
1603 irq_status &= INT_MASK;
1604
1605 msm_host_writel(msm_host, irq_status, host,
1606 msm_offset->core_pwrctl_clear);
1607
1608
1609
1610
1611
1612
1613
1614
1615 while (irq_status & msm_host_readl(msm_host, host,
1616 msm_offset->core_pwrctl_status)) {
1617 if (retry == 0) {
1618 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1619 mmc_hostname(host->mmc), irq_status);
1620 sdhci_msm_dump_pwr_ctrl_regs(host);
1621 WARN_ON(1);
1622 break;
1623 }
1624 msm_host_writel(msm_host, irq_status, host,
1625 msm_offset->core_pwrctl_clear);
1626 retry--;
1627 udelay(10);
1628 }
1629
1630
1631 if (irq_status & CORE_PWRCTL_BUS_ON) {
1632 pwr_state = REQ_BUS_ON;
1633 io_level = REQ_IO_HIGH;
1634 }
1635 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1636 pwr_state = REQ_BUS_OFF;
1637 io_level = REQ_IO_LOW;
1638 }
1639
1640 if (pwr_state) {
1641 ret = sdhci_msm_set_vmmc(mmc);
1642 if (!ret)
1643 ret = sdhci_msm_set_vqmmc(msm_host, mmc,
1644 pwr_state & REQ_BUS_ON);
1645 if (!ret)
1646 ret = sdhci_msm_set_pincfg(msm_host,
1647 pwr_state & REQ_BUS_ON);
1648 if (!ret)
1649 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1650 else
1651 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1652 }
1653
1654
1655 if (irq_status & CORE_PWRCTL_IO_LOW)
1656 io_level = REQ_IO_LOW;
1657
1658 if (irq_status & CORE_PWRCTL_IO_HIGH)
1659 io_level = REQ_IO_HIGH;
1660
1661 if (io_level)
1662 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1663
1664 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) {
1665 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
1666 if (ret < 0) {
1667 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n",
1668 mmc_hostname(mmc), ret,
1669 mmc->ios.signal_voltage, mmc->ios.vdd,
1670 irq_status);
1671 irq_ack |= CORE_PWRCTL_IO_FAIL;
1672 }
1673 }
1674
1675
1676
1677
1678
1679
1680 msm_host_writel(msm_host, irq_ack, host,
1681 msm_offset->core_pwrctl_ctl);
1682
1683
1684
1685
1686
1687 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1688 u32 new_config;
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 config = readl_relaxed(host->ioaddr +
1701 msm_offset->core_vendor_spec);
1702 new_config = config;
1703
1704 if ((io_level & REQ_IO_HIGH) &&
1705 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1706 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1707 else if ((io_level & REQ_IO_LOW) ||
1708 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1709 new_config |= CORE_IO_PAD_PWR_SWITCH;
1710
1711 if (config ^ new_config)
1712 writel_relaxed(new_config, host->ioaddr +
1713 msm_offset->core_vendor_spec);
1714 }
1715
1716 if (pwr_state)
1717 msm_host->curr_pwr_state = pwr_state;
1718 if (io_level)
1719 msm_host->curr_io_level = io_level;
1720
1721 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1722 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1723 irq_ack);
1724 }
1725
1726 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1727 {
1728 struct sdhci_host *host = (struct sdhci_host *)data;
1729 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1730 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1731
1732 sdhci_msm_handle_pwr_irq(host, irq);
1733 msm_host->pwr_irq_flag = 1;
1734 sdhci_msm_complete_pwr_irq_wait(msm_host);
1735
1736
1737 return IRQ_HANDLED;
1738 }
1739
1740 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1741 {
1742 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1743 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1744 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1745
1746 return clk_round_rate(core_clk, ULONG_MAX);
1747 }
1748
1749 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1750 {
1751 return SDHCI_MSM_MIN_CLOCK;
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1763 {
1764 u16 clk;
1765
1766 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1767
1768 if (clock == 0)
1769 return;
1770
1771
1772
1773
1774
1775
1776 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1777 sdhci_enable_clk(host, clk);
1778 }
1779
1780
1781 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1782 {
1783 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1784 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1785
1786 if (!clock) {
1787 host->mmc->actual_clock = msm_host->clk_rate = 0;
1788 goto out;
1789 }
1790
1791 sdhci_msm_hc_select_mode(host);
1792
1793 msm_set_clock_rate_for_bus_mode(host, clock);
1794 out:
1795 __sdhci_msm_set_clock(host, clock);
1796 }
1797
1798
1799
1800
1801
1802
1803
1804 #ifdef CONFIG_MMC_CRYPTO
1805
1806 #define AES_256_XTS_KEY_SIZE 64
1807
1808
1809
1810 #define QCOM_ICE_REG_VERSION 0x0008
1811
1812 #define QCOM_ICE_REG_FUSE_SETTING 0x0010
1813 #define QCOM_ICE_FUSE_SETTING_MASK 0x1
1814 #define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
1815 #define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
1816
1817 #define QCOM_ICE_REG_BIST_STATUS 0x0070
1818 #define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
1819
1820 #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
1821
1822 #define sdhci_msm_ice_writel(host, val, reg) \
1823 writel((val), (host)->ice_mem + (reg))
1824 #define sdhci_msm_ice_readl(host, reg) \
1825 readl((host)->ice_mem + (reg))
1826
1827 static bool sdhci_msm_ice_supported(struct sdhci_msm_host *msm_host)
1828 {
1829 struct device *dev = mmc_dev(msm_host->mmc);
1830 u32 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_VERSION);
1831 int major = regval >> 24;
1832 int minor = (regval >> 16) & 0xFF;
1833 int step = regval & 0xFFFF;
1834
1835
1836 if (major != 3) {
1837 dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
1838 major, minor, step);
1839 return false;
1840 }
1841
1842 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
1843 major, minor, step);
1844
1845
1846 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_FUSE_SETTING);
1847 if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
1848 QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
1849 QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
1850 dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
1851 return false;
1852 }
1853 return true;
1854 }
1855
1856 static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
1857 {
1858 return devm_clk_get(dev, "ice");
1859 }
1860
1861 static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
1862 struct cqhci_host *cq_host)
1863 {
1864 struct mmc_host *mmc = msm_host->mmc;
1865 struct device *dev = mmc_dev(mmc);
1866 struct resource *res;
1867
1868 if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
1869 return 0;
1870
1871 res = platform_get_resource_byname(msm_host->pdev, IORESOURCE_MEM,
1872 "ice");
1873 if (!res) {
1874 dev_warn(dev, "ICE registers not found\n");
1875 goto disable;
1876 }
1877
1878 if (!qcom_scm_ice_available()) {
1879 dev_warn(dev, "ICE SCM interface not found\n");
1880 goto disable;
1881 }
1882
1883 msm_host->ice_mem = devm_ioremap_resource(dev, res);
1884 if (IS_ERR(msm_host->ice_mem))
1885 return PTR_ERR(msm_host->ice_mem);
1886
1887 if (!sdhci_msm_ice_supported(msm_host))
1888 goto disable;
1889
1890 mmc->caps2 |= MMC_CAP2_CRYPTO;
1891 return 0;
1892
1893 disable:
1894 dev_warn(dev, "Disabling inline encryption support\n");
1895 return 0;
1896 }
1897
1898 static void sdhci_msm_ice_low_power_mode_enable(struct sdhci_msm_host *msm_host)
1899 {
1900 u32 regval;
1901
1902 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
1903
1904
1905
1906
1907 regval |= 0x7000;
1908 sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
1909 }
1910
1911 static void sdhci_msm_ice_optimization_enable(struct sdhci_msm_host *msm_host)
1912 {
1913 u32 regval;
1914
1915
1916 regval = sdhci_msm_ice_readl(msm_host, QCOM_ICE_REG_ADVANCED_CONTROL);
1917 regval |= 0xD807100;
1918
1919 udelay(5);
1920 sdhci_msm_ice_writel(msm_host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
1921 udelay(5);
1922 }
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 static int sdhci_msm_ice_wait_bist_status(struct sdhci_msm_host *msm_host)
1937 {
1938 u32 regval;
1939 int err;
1940
1941 err = readl_poll_timeout(msm_host->ice_mem + QCOM_ICE_REG_BIST_STATUS,
1942 regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
1943 50, 5000);
1944 if (err)
1945 dev_err(mmc_dev(msm_host->mmc),
1946 "Timed out waiting for ICE self-test to complete\n");
1947 return err;
1948 }
1949
1950 static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
1951 {
1952 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
1953 return;
1954 sdhci_msm_ice_low_power_mode_enable(msm_host);
1955 sdhci_msm_ice_optimization_enable(msm_host);
1956 sdhci_msm_ice_wait_bist_status(msm_host);
1957 }
1958
1959 static int __maybe_unused sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
1960 {
1961 if (!(msm_host->mmc->caps2 & MMC_CAP2_CRYPTO))
1962 return 0;
1963 return sdhci_msm_ice_wait_bist_status(msm_host);
1964 }
1965
1966
1967
1968
1969
1970 static int sdhci_msm_program_key(struct cqhci_host *cq_host,
1971 const union cqhci_crypto_cfg_entry *cfg,
1972 int slot)
1973 {
1974 struct device *dev = mmc_dev(cq_host->mmc);
1975 union cqhci_crypto_cap_entry cap;
1976 union {
1977 u8 bytes[AES_256_XTS_KEY_SIZE];
1978 u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
1979 } key;
1980 int i;
1981 int err;
1982
1983 if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
1984 return qcom_scm_ice_invalidate_key(slot);
1985
1986
1987 cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
1988 if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
1989 cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256) {
1990 dev_err_ratelimited(dev,
1991 "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
1992 cap.algorithm_id, cap.key_size);
1993 return -EINVAL;
1994 }
1995
1996 memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
1997
1998
1999
2000
2001
2002 for (i = 0; i < ARRAY_SIZE(key.words); i++)
2003 __cpu_to_be32s(&key.words[i]);
2004
2005 err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
2006 QCOM_SCM_ICE_CIPHER_AES_256_XTS,
2007 cfg->data_unit_size);
2008 memzero_explicit(&key, sizeof(key));
2009 return err;
2010 }
2011 #else
2012 static inline struct clk *sdhci_msm_ice_get_clk(struct device *dev)
2013 {
2014 return NULL;
2015 }
2016
2017 static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
2018 struct cqhci_host *cq_host)
2019 {
2020 return 0;
2021 }
2022
2023 static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
2024 {
2025 }
2026
2027 static inline int __maybe_unused
2028 sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
2029 {
2030 return 0;
2031 }
2032 #endif
2033
2034
2035
2036
2037
2038
2039
2040 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
2041 {
2042 int cmd_error = 0;
2043 int data_error = 0;
2044
2045 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
2046 return intmask;
2047
2048 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
2049 return 0;
2050 }
2051
2052 static void sdhci_msm_cqe_enable(struct mmc_host *mmc)
2053 {
2054 struct sdhci_host *host = mmc_priv(mmc);
2055 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2056 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2057
2058 sdhci_cqe_enable(mmc);
2059 sdhci_msm_ice_enable(msm_host);
2060 }
2061
2062 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
2063 {
2064 struct sdhci_host *host = mmc_priv(mmc);
2065 unsigned long flags;
2066 u32 ctrl;
2067
2068
2069
2070
2071
2072 if (host->flags & SDHCI_USE_64_BIT_DMA)
2073 host->desc_sz = 16;
2074
2075 spin_lock_irqsave(&host->lock, flags);
2076
2077
2078
2079
2080
2081
2082
2083 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
2084 ctrl |= SDHCI_INT_RESPONSE;
2085 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
2086 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
2087
2088 spin_unlock_irqrestore(&host->lock, flags);
2089
2090 sdhci_cqe_disable(mmc, recovery);
2091 }
2092
2093 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
2094 {
2095 u32 count, start = 15;
2096
2097 __sdhci_set_timeout(host, cmd);
2098 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
2099
2100
2101
2102
2103
2104 if (cmd && cmd->data && host->clock > 400000 &&
2105 host->clock <= 50000000 &&
2106 ((1 << (count + start)) > (10 * host->clock)))
2107 host->data_timeout = 22LL * NSEC_PER_SEC;
2108 }
2109
2110 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
2111 .enable = sdhci_msm_cqe_enable,
2112 .disable = sdhci_msm_cqe_disable,
2113 #ifdef CONFIG_MMC_CRYPTO
2114 .program_key = sdhci_msm_program_key,
2115 #endif
2116 };
2117
2118 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
2119 struct platform_device *pdev)
2120 {
2121 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2122 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2123 struct cqhci_host *cq_host;
2124 bool dma64;
2125 u32 cqcfg;
2126 int ret;
2127
2128
2129
2130
2131
2132 if (host->caps & SDHCI_CAN_64BIT)
2133 host->alloc_desc_sz = 16;
2134
2135 ret = sdhci_setup_host(host);
2136 if (ret)
2137 return ret;
2138
2139 cq_host = cqhci_pltfm_init(pdev);
2140 if (IS_ERR(cq_host)) {
2141 ret = PTR_ERR(cq_host);
2142 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
2143 goto cleanup;
2144 }
2145
2146 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
2147 cq_host->ops = &sdhci_msm_cqhci_ops;
2148
2149 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
2150
2151 ret = sdhci_msm_ice_init(msm_host, cq_host);
2152 if (ret)
2153 goto cleanup;
2154
2155 ret = cqhci_init(cq_host, host->mmc, dma64);
2156 if (ret) {
2157 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
2158 mmc_hostname(host->mmc), ret);
2159 goto cleanup;
2160 }
2161
2162
2163 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
2164 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
2165 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
2166
2167
2168
2169
2170
2171
2172
2173 if (host->flags & SDHCI_USE_64_BIT_DMA)
2174 host->desc_sz = 12;
2175
2176 ret = __sdhci_add_host(host);
2177 if (ret)
2178 goto cleanup;
2179
2180 dev_info(&pdev->dev, "%s: CQE init: success\n",
2181 mmc_hostname(host->mmc));
2182 return ret;
2183
2184 cleanup:
2185 sdhci_cleanup_host(host);
2186 return ret;
2187 }
2188
2189
2190
2191
2192
2193
2194
2195
2196 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
2197 {
2198 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2199 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2200 u32 req_type = 0;
2201
2202 switch (reg) {
2203 case SDHCI_HOST_CONTROL2:
2204 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
2205 REQ_IO_HIGH;
2206 break;
2207 case SDHCI_SOFTWARE_RESET:
2208 if (host->pwr && (val & SDHCI_RESET_ALL))
2209 req_type = REQ_BUS_OFF;
2210 break;
2211 case SDHCI_POWER_CONTROL:
2212 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
2213 break;
2214 case SDHCI_TRANSFER_MODE:
2215 msm_host->transfer_mode = val;
2216 break;
2217 case SDHCI_COMMAND:
2218 if (!msm_host->use_cdr)
2219 break;
2220 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
2221 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
2222 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
2223 sdhci_msm_set_cdr(host, true);
2224 else
2225 sdhci_msm_set_cdr(host, false);
2226 break;
2227 }
2228
2229 if (req_type) {
2230 msm_host->pwr_irq_flag = 0;
2231
2232
2233
2234
2235 mb();
2236 }
2237 return req_type;
2238 }
2239
2240
2241 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
2242 {
2243 u32 req_type = 0;
2244
2245 req_type = __sdhci_msm_check_write(host, val, reg);
2246 writew_relaxed(val, host->ioaddr + reg);
2247
2248 if (req_type)
2249 sdhci_msm_check_power_status(host, req_type);
2250 }
2251
2252
2253 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
2254 {
2255 u32 req_type = 0;
2256
2257 req_type = __sdhci_msm_check_write(host, val, reg);
2258
2259 writeb_relaxed(val, host->ioaddr + reg);
2260
2261 if (req_type)
2262 sdhci_msm_check_power_status(host, req_type);
2263 }
2264
2265 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
2266 {
2267 struct mmc_host *mmc = msm_host->mmc;
2268 struct regulator *supply = mmc->supply.vqmmc;
2269 u32 caps = 0, config;
2270 struct sdhci_host *host = mmc_priv(mmc);
2271 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2272
2273 if (!IS_ERR(mmc->supply.vqmmc)) {
2274 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
2275 caps |= CORE_1_8V_SUPPORT;
2276 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
2277 caps |= CORE_3_0V_SUPPORT;
2278
2279 if (!caps)
2280 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
2281 mmc_hostname(mmc));
2282 }
2283
2284 if (caps) {
2285
2286
2287
2288
2289 u32 io_level = msm_host->curr_io_level;
2290
2291 config = readl_relaxed(host->ioaddr +
2292 msm_offset->core_vendor_spec);
2293 config |= CORE_IO_PAD_PWR_SWITCH_EN;
2294
2295 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
2296 config &= ~CORE_IO_PAD_PWR_SWITCH;
2297 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
2298 config |= CORE_IO_PAD_PWR_SWITCH;
2299
2300 writel_relaxed(config,
2301 host->ioaddr + msm_offset->core_vendor_spec);
2302 }
2303 msm_host->caps_0 |= caps;
2304 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
2305 }
2306
2307 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
2308 {
2309 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
2310 cqhci_deactivate(host->mmc);
2311 sdhci_reset(host, mask);
2312 }
2313
2314 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host)
2315 {
2316 int ret;
2317
2318 ret = mmc_regulator_get_supply(msm_host->mmc);
2319 if (ret)
2320 return ret;
2321
2322 sdhci_msm_set_regulator_caps(msm_host);
2323
2324 return 0;
2325 }
2326
2327 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc,
2328 struct mmc_ios *ios)
2329 {
2330 struct sdhci_host *host = mmc_priv(mmc);
2331 u16 ctrl, status;
2332
2333
2334
2335
2336
2337 if (host->version < SDHCI_SPEC_300)
2338 return 0;
2339
2340 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2341
2342 switch (ios->signal_voltage) {
2343 case MMC_SIGNAL_VOLTAGE_330:
2344 if (!(host->flags & SDHCI_SIGNALING_330))
2345 return -EINVAL;
2346
2347
2348 ctrl &= ~SDHCI_CTRL_VDD_180;
2349 break;
2350 case MMC_SIGNAL_VOLTAGE_180:
2351 if (!(host->flags & SDHCI_SIGNALING_180))
2352 return -EINVAL;
2353
2354
2355 ctrl |= SDHCI_CTRL_VDD_180;
2356 break;
2357
2358 default:
2359 return -EINVAL;
2360 }
2361
2362 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2363
2364
2365 usleep_range(5000, 5500);
2366
2367
2368 status = ctrl & SDHCI_CTRL_VDD_180;
2369 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2370 if ((ctrl & SDHCI_CTRL_VDD_180) == status)
2371 return 0;
2372
2373 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n",
2374 mmc_hostname(mmc));
2375
2376 return -EAGAIN;
2377 }
2378
2379 #define DRIVER_NAME "sdhci_msm"
2380 #define SDHCI_MSM_DUMP(f, x...) \
2381 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2382
2383 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2384 {
2385 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2386 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2387 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2388
2389 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
2390
2391 SDHCI_MSM_DUMP(
2392 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
2393 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
2394 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
2395 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
2396 SDHCI_MSM_DUMP(
2397 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
2398 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
2399 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
2400 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
2401 SDHCI_MSM_DUMP(
2402 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
2403 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
2404 readl_relaxed(host->ioaddr +
2405 msm_offset->core_vendor_spec_func2),
2406 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
2407 }
2408
2409 static const struct sdhci_msm_variant_ops mci_var_ops = {
2410 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
2411 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
2412 };
2413
2414 static const struct sdhci_msm_variant_ops v5_var_ops = {
2415 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
2416 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
2417 };
2418
2419 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
2420 .var_ops = &mci_var_ops,
2421 .offset = &sdhci_msm_mci_offset,
2422 };
2423
2424 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
2425 .mci_removed = true,
2426 .var_ops = &v5_var_ops,
2427 .offset = &sdhci_msm_v5_offset,
2428 };
2429
2430 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
2431 .mci_removed = true,
2432 .restore_dll_config = true,
2433 .var_ops = &v5_var_ops,
2434 .offset = &sdhci_msm_v5_offset,
2435 };
2436
2437 static const struct of_device_id sdhci_msm_dt_match[] = {
2438
2439
2440
2441
2442 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2443 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2444 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2445 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2446 {},
2447 };
2448
2449 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
2450
2451 static const struct sdhci_ops sdhci_msm_ops = {
2452 .reset = sdhci_msm_reset,
2453 .set_clock = sdhci_msm_set_clock,
2454 .get_min_clock = sdhci_msm_get_min_clock,
2455 .get_max_clock = sdhci_msm_get_max_clock,
2456 .set_bus_width = sdhci_set_bus_width,
2457 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
2458 .write_w = sdhci_msm_writew,
2459 .write_b = sdhci_msm_writeb,
2460 .irq = sdhci_msm_cqe_irq,
2461 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
2462 .set_power = sdhci_set_power_noreg,
2463 .set_timeout = sdhci_msm_set_timeout,
2464 };
2465
2466 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
2467 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2468 SDHCI_QUIRK_SINGLE_POWER_WRITE |
2469 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
2470 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
2471
2472 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2473 .ops = &sdhci_msm_ops,
2474 };
2475
2476 static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
2477 struct sdhci_host *host)
2478 {
2479 struct device_node *node = pdev->dev.of_node;
2480 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2481 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2482
2483 if (of_property_read_u32(node, "qcom,ddr-config",
2484 &msm_host->ddr_config))
2485 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
2486
2487 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
2488 }
2489
2490 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
2491 {
2492 struct reset_control *reset;
2493 int ret = 0;
2494
2495 reset = reset_control_get_optional_exclusive(dev, NULL);
2496 if (IS_ERR(reset))
2497 return dev_err_probe(dev, PTR_ERR(reset),
2498 "unable to acquire core_reset\n");
2499
2500 if (!reset)
2501 return ret;
2502
2503 ret = reset_control_assert(reset);
2504 if (ret) {
2505 reset_control_put(reset);
2506 return dev_err_probe(dev, ret, "core_reset assert failed\n");
2507 }
2508
2509
2510
2511
2512
2513
2514 usleep_range(200, 210);
2515
2516 ret = reset_control_deassert(reset);
2517 if (ret) {
2518 reset_control_put(reset);
2519 return dev_err_probe(dev, ret, "core_reset deassert failed\n");
2520 }
2521
2522 usleep_range(200, 210);
2523 reset_control_put(reset);
2524
2525 return ret;
2526 }
2527
2528 static int sdhci_msm_probe(struct platform_device *pdev)
2529 {
2530 struct sdhci_host *host;
2531 struct sdhci_pltfm_host *pltfm_host;
2532 struct sdhci_msm_host *msm_host;
2533 struct clk *clk;
2534 int ret;
2535 u16 host_version, core_minor;
2536 u32 core_version, config;
2537 u8 core_major;
2538 const struct sdhci_msm_offset *msm_offset;
2539 const struct sdhci_msm_variant_info *var_info;
2540 struct device_node *node = pdev->dev.of_node;
2541
2542 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2543 if (IS_ERR(host))
2544 return PTR_ERR(host);
2545
2546 host->sdma_boundary = 0;
2547 pltfm_host = sdhci_priv(host);
2548 msm_host = sdhci_pltfm_priv(pltfm_host);
2549 msm_host->mmc = host->mmc;
2550 msm_host->pdev = pdev;
2551
2552 ret = mmc_of_parse(host->mmc);
2553 if (ret)
2554 goto pltfm_free;
2555
2556
2557
2558
2559
2560 var_info = of_device_get_match_data(&pdev->dev);
2561
2562 msm_host->mci_removed = var_info->mci_removed;
2563 msm_host->restore_dll_config = var_info->restore_dll_config;
2564 msm_host->var_ops = var_info->var_ops;
2565 msm_host->offset = var_info->offset;
2566
2567 msm_offset = msm_host->offset;
2568
2569 sdhci_get_of_property(pdev);
2570 sdhci_msm_get_of_property(pdev, host);
2571
2572 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2573
2574 ret = sdhci_msm_gcc_reset(&pdev->dev, host);
2575 if (ret)
2576 goto pltfm_free;
2577
2578
2579 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2580 if (!IS_ERR(msm_host->bus_clk)) {
2581
2582 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2583 if (ret)
2584 goto pltfm_free;
2585 ret = clk_prepare_enable(msm_host->bus_clk);
2586 if (ret)
2587 goto pltfm_free;
2588 }
2589
2590
2591 clk = devm_clk_get(&pdev->dev, "iface");
2592 if (IS_ERR(clk)) {
2593 ret = PTR_ERR(clk);
2594 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2595 goto bus_clk_disable;
2596 }
2597 msm_host->bulk_clks[1].clk = clk;
2598
2599
2600 clk = devm_clk_get(&pdev->dev, "core");
2601 if (IS_ERR(clk)) {
2602 ret = PTR_ERR(clk);
2603 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2604 goto bus_clk_disable;
2605 }
2606 msm_host->bulk_clks[0].clk = clk;
2607
2608
2609 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL);
2610 if (ret)
2611 goto bus_clk_disable;
2612
2613 ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
2614 if (ret)
2615 goto bus_clk_disable;
2616
2617
2618 ret = devm_pm_opp_of_add_table(&pdev->dev);
2619 if (ret && ret != -ENODEV) {
2620 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2621 goto bus_clk_disable;
2622 }
2623
2624
2625 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2626 if (ret)
2627 dev_warn(&pdev->dev, "core clock boost failed\n");
2628
2629 clk = devm_clk_get(&pdev->dev, "cal");
2630 if (IS_ERR(clk))
2631 clk = NULL;
2632 msm_host->bulk_clks[2].clk = clk;
2633
2634 clk = devm_clk_get(&pdev->dev, "sleep");
2635 if (IS_ERR(clk))
2636 clk = NULL;
2637 msm_host->bulk_clks[3].clk = clk;
2638
2639 clk = sdhci_msm_ice_get_clk(&pdev->dev);
2640 if (IS_ERR(clk))
2641 clk = NULL;
2642 msm_host->bulk_clks[4].clk = clk;
2643
2644 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2645 msm_host->bulk_clks);
2646 if (ret)
2647 goto bus_clk_disable;
2648
2649
2650
2651
2652
2653 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2654 if (IS_ERR(msm_host->xo_clk)) {
2655 ret = PTR_ERR(msm_host->xo_clk);
2656 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2657 }
2658
2659 if (!msm_host->mci_removed) {
2660 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2661 if (IS_ERR(msm_host->core_mem)) {
2662 ret = PTR_ERR(msm_host->core_mem);
2663 goto clk_disable;
2664 }
2665 }
2666
2667
2668 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2669 host->ioaddr + msm_offset->core_vendor_spec);
2670
2671 if (!msm_host->mci_removed) {
2672
2673 msm_host_writel(msm_host, HC_MODE_EN, host,
2674 msm_offset->core_hc_mode);
2675 config = msm_host_readl(msm_host, host,
2676 msm_offset->core_hc_mode);
2677 config |= FF_CLK_SW_RST_DIS;
2678 msm_host_writel(msm_host, config, host,
2679 msm_offset->core_hc_mode);
2680 }
2681
2682 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2683 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2684 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2685 SDHCI_VENDOR_VER_SHIFT));
2686
2687 core_version = msm_host_readl(msm_host, host,
2688 msm_offset->core_mci_version);
2689 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2690 CORE_VERSION_MAJOR_SHIFT;
2691 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2692 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2693 core_version, core_major, core_minor);
2694
2695 if (core_major == 1 && core_minor >= 0x42)
2696 msm_host->use_14lpp_dll_reset = true;
2697
2698
2699
2700
2701
2702 if (core_major == 1 && core_minor < 0x34)
2703 msm_host->use_cdclp533 = true;
2704
2705
2706
2707
2708
2709 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2710 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2711 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2712 writel_relaxed(config, host->ioaddr +
2713 msm_offset->core_vendor_spec_capabilities0);
2714 }
2715
2716 if (core_major == 1 && core_minor >= 0x49)
2717 msm_host->updated_ddr_cfg = true;
2718
2719 if (core_major == 1 && core_minor >= 0x71)
2720 msm_host->uses_tassadar_dll = true;
2721
2722 ret = sdhci_msm_register_vreg(msm_host);
2723 if (ret)
2724 goto clk_disable;
2725
2726
2727
2728
2729
2730
2731
2732
2733 sdhci_msm_handle_pwr_irq(host, 0);
2734
2735
2736
2737
2738
2739 mb();
2740
2741
2742 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2743 if (msm_host->pwr_irq < 0) {
2744 ret = msm_host->pwr_irq;
2745 goto clk_disable;
2746 }
2747
2748 sdhci_msm_init_pwr_irq_wait(msm_host);
2749
2750 msm_host_writel(msm_host, INT_MASK, host,
2751 msm_offset->core_pwrctl_mask);
2752
2753 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2754 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2755 dev_name(&pdev->dev), host);
2756 if (ret) {
2757 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2758 goto clk_disable;
2759 }
2760
2761 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2762
2763
2764 host->max_timeout_count = 0xF;
2765
2766 pm_runtime_get_noresume(&pdev->dev);
2767 pm_runtime_set_active(&pdev->dev);
2768 pm_runtime_enable(&pdev->dev);
2769 pm_runtime_set_autosuspend_delay(&pdev->dev,
2770 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2771 pm_runtime_use_autosuspend(&pdev->dev);
2772
2773 host->mmc_host_ops.start_signal_voltage_switch =
2774 sdhci_msm_start_signal_voltage_switch;
2775 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2776 if (of_property_read_bool(node, "supports-cqe"))
2777 ret = sdhci_msm_cqe_add_host(host, pdev);
2778 else
2779 ret = sdhci_add_host(host);
2780 if (ret)
2781 goto pm_runtime_disable;
2782
2783 pm_runtime_mark_last_busy(&pdev->dev);
2784 pm_runtime_put_autosuspend(&pdev->dev);
2785
2786 return 0;
2787
2788 pm_runtime_disable:
2789 pm_runtime_disable(&pdev->dev);
2790 pm_runtime_set_suspended(&pdev->dev);
2791 pm_runtime_put_noidle(&pdev->dev);
2792 clk_disable:
2793 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2794 msm_host->bulk_clks);
2795 bus_clk_disable:
2796 if (!IS_ERR(msm_host->bus_clk))
2797 clk_disable_unprepare(msm_host->bus_clk);
2798 pltfm_free:
2799 sdhci_pltfm_free(pdev);
2800 return ret;
2801 }
2802
2803 static int sdhci_msm_remove(struct platform_device *pdev)
2804 {
2805 struct sdhci_host *host = platform_get_drvdata(pdev);
2806 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2807 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2808 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2809 0xffffffff);
2810
2811 sdhci_remove_host(host, dead);
2812
2813 pm_runtime_get_sync(&pdev->dev);
2814 pm_runtime_disable(&pdev->dev);
2815 pm_runtime_put_noidle(&pdev->dev);
2816
2817 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2818 msm_host->bulk_clks);
2819 if (!IS_ERR(msm_host->bus_clk))
2820 clk_disable_unprepare(msm_host->bus_clk);
2821 sdhci_pltfm_free(pdev);
2822 return 0;
2823 }
2824
2825 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2826 {
2827 struct sdhci_host *host = dev_get_drvdata(dev);
2828 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2829 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2830
2831
2832 dev_pm_opp_set_rate(dev, 0);
2833 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2834 msm_host->bulk_clks);
2835
2836 return 0;
2837 }
2838
2839 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2840 {
2841 struct sdhci_host *host = dev_get_drvdata(dev);
2842 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2843 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2844 int ret;
2845
2846 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2847 msm_host->bulk_clks);
2848 if (ret)
2849 return ret;
2850
2851
2852
2853
2854 if (msm_host->restore_dll_config && msm_host->clk_rate) {
2855 ret = sdhci_msm_restore_sdr_dll_config(host);
2856 if (ret)
2857 return ret;
2858 }
2859
2860 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2861
2862 return sdhci_msm_ice_resume(msm_host);
2863 }
2864
2865 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2866 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2867 pm_runtime_force_resume)
2868 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2869 sdhci_msm_runtime_resume,
2870 NULL)
2871 };
2872
2873 static struct platform_driver sdhci_msm_driver = {
2874 .probe = sdhci_msm_probe,
2875 .remove = sdhci_msm_remove,
2876 .driver = {
2877 .name = "sdhci_msm",
2878 .of_match_table = sdhci_msm_dt_match,
2879 .pm = &sdhci_msm_pm_ops,
2880 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2881 },
2882 };
2883
2884 module_platform_driver(sdhci_msm_driver);
2885
2886 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2887 MODULE_LICENSE("GPL v2");