0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/arm-smccc.h>
0010 #include <linux/bitfield.h>
0011 #include <linux/clk.h>
0012 #include <linux/delay.h>
0013 #include <linux/module.h>
0014 #include <linux/of.h>
0015 #include <linux/of_address.h>
0016 #include <linux/of_device.h>
0017 #include <linux/phy/phy.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/pm_qos.h>
0020 #include <linux/regulator/consumer.h>
0021 #include <linux/reset.h>
0022 #include <linux/sched/clock.h>
0023 #include <linux/soc/mediatek/mtk_sip_svc.h>
0024
0025 #include <ufs/ufshcd.h>
0026 #include "ufshcd-pltfrm.h"
0027 #include <ufs/ufs_quirks.h>
0028 #include <ufs/unipro.h>
0029 #include "ufs-mediatek.h"
0030
0031 #define CREATE_TRACE_POINTS
0032 #include "ufs-mediatek-trace.h"
0033
0034 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
0035 { .wmanufacturerid = UFS_ANY_VENDOR,
0036 .model = UFS_ANY_MODEL,
0037 .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
0038 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
0039 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
0040 .model = "H9HQ21AFAMZDAR",
0041 .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
0042 {}
0043 };
0044
0045 static const struct of_device_id ufs_mtk_of_match[] = {
0046 { .compatible = "mediatek,mt8183-ufshci" },
0047 {},
0048 };
0049
0050 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
0051 {
0052 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0053
0054 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
0055 }
0056
0057 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
0058 {
0059 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0060
0061 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
0062 }
0063
0064 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
0065 {
0066 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0067
0068 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
0069 }
0070
0071 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
0072 {
0073 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0074
0075 return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
0076 }
0077
0078 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
0079 {
0080 u32 tmp;
0081
0082 if (enable) {
0083 ufshcd_dme_get(hba,
0084 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
0085 tmp = tmp |
0086 (1 << RX_SYMBOL_CLK_GATE_EN) |
0087 (1 << SYS_CLK_GATE_EN) |
0088 (1 << TX_CLK_GATE_EN);
0089 ufshcd_dme_set(hba,
0090 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
0091
0092 ufshcd_dme_get(hba,
0093 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
0094 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
0095 ufshcd_dme_set(hba,
0096 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
0097 } else {
0098 ufshcd_dme_get(hba,
0099 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
0100 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
0101 (1 << SYS_CLK_GATE_EN) |
0102 (1 << TX_CLK_GATE_EN));
0103 ufshcd_dme_set(hba,
0104 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
0105
0106 ufshcd_dme_get(hba,
0107 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
0108 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
0109 ufshcd_dme_set(hba,
0110 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
0111 }
0112 }
0113
0114 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
0115 {
0116 struct arm_smccc_res res;
0117
0118 ufs_mtk_crypto_ctrl(res, 1);
0119 if (res.a0) {
0120 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
0121 __func__, res.a0);
0122 hba->caps &= ~UFSHCD_CAP_CRYPTO;
0123 }
0124 }
0125
0126 static void ufs_mtk_host_reset(struct ufs_hba *hba)
0127 {
0128 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0129
0130 reset_control_assert(host->hci_reset);
0131 reset_control_assert(host->crypto_reset);
0132 reset_control_assert(host->unipro_reset);
0133
0134 usleep_range(100, 110);
0135
0136 reset_control_deassert(host->unipro_reset);
0137 reset_control_deassert(host->crypto_reset);
0138 reset_control_deassert(host->hci_reset);
0139 }
0140
0141 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
0142 struct reset_control **rc,
0143 char *str)
0144 {
0145 *rc = devm_reset_control_get(hba->dev, str);
0146 if (IS_ERR(*rc)) {
0147 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
0148 str, PTR_ERR(*rc));
0149 *rc = NULL;
0150 }
0151 }
0152
0153 static void ufs_mtk_init_reset(struct ufs_hba *hba)
0154 {
0155 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0156
0157 ufs_mtk_init_reset_control(hba, &host->hci_reset,
0158 "hci_rst");
0159 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
0160 "unipro_rst");
0161 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
0162 "crypto_rst");
0163 }
0164
0165 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
0166 enum ufs_notify_change_status status)
0167 {
0168 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0169
0170 if (status == PRE_CHANGE) {
0171 if (host->unipro_lpm) {
0172 hba->vps->hba_enable_delay_us = 0;
0173 } else {
0174 hba->vps->hba_enable_delay_us = 600;
0175 ufs_mtk_host_reset(hba);
0176 }
0177
0178 if (hba->caps & UFSHCD_CAP_CRYPTO)
0179 ufs_mtk_crypto_enable(hba);
0180
0181 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
0182 ufshcd_writel(hba, 0,
0183 REG_AUTO_HIBERNATE_IDLE_TIMER);
0184 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
0185 hba->ahit = 0;
0186 }
0187
0188
0189
0190
0191
0192 ufshcd_writel(hba,
0193 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
0194 REG_UFS_XOUFS_CTRL);
0195 }
0196
0197 return 0;
0198 }
0199
0200 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
0201 {
0202 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0203 struct device *dev = hba->dev;
0204 struct device_node *np = dev->of_node;
0205 int err = 0;
0206
0207 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
0208
0209 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
0210
0211
0212
0213
0214 err = -EPROBE_DEFER;
0215 dev_info(dev,
0216 "%s: required phy hasn't probed yet. err = %d\n",
0217 __func__, err);
0218 } else if (IS_ERR(host->mphy)) {
0219 err = PTR_ERR(host->mphy);
0220 if (err != -ENODEV) {
0221 dev_info(dev, "%s: PHY get failed %d\n", __func__,
0222 err);
0223 }
0224 }
0225
0226 if (err)
0227 host->mphy = NULL;
0228
0229
0230
0231
0232 if (err == -ENODEV)
0233 err = 0;
0234
0235 return err;
0236 }
0237
0238 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
0239 {
0240 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0241 struct arm_smccc_res res;
0242 ktime_t timeout, time_checked;
0243 u32 value;
0244
0245 if (host->ref_clk_enabled == on)
0246 return 0;
0247
0248 ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
0249
0250 if (on) {
0251 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
0252 } else {
0253 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
0254 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
0255 }
0256
0257
0258 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
0259 do {
0260 time_checked = ktime_get();
0261 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
0262
0263
0264 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
0265 goto out;
0266
0267 usleep_range(100, 200);
0268 } while (ktime_before(time_checked, timeout));
0269
0270 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
0271
0272 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
0273
0274 return -ETIMEDOUT;
0275
0276 out:
0277 host->ref_clk_enabled = on;
0278 if (on)
0279 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
0280
0281 ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
0282
0283 return 0;
0284 }
0285
0286 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
0287 u16 gating_us)
0288 {
0289 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0290
0291 if (hba->dev_info.clk_gating_wait_us) {
0292 host->ref_clk_gating_wait_us =
0293 hba->dev_info.clk_gating_wait_us;
0294 } else {
0295 host->ref_clk_gating_wait_us = gating_us;
0296 }
0297
0298 host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
0299 }
0300
0301 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
0302 {
0303 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0304
0305 if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
0306 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
0307 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
0308 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
0309 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
0310 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
0311 } else {
0312 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
0313 }
0314 }
0315
0316 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
0317 unsigned long retry_ms)
0318 {
0319 u64 timeout, time_checked;
0320 u32 val, sm;
0321 bool wait_idle;
0322
0323
0324 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
0325
0326
0327 udelay(10);
0328 wait_idle = false;
0329
0330 do {
0331 time_checked = ktime_get_mono_fast_ns();
0332 ufs_mtk_dbg_sel(hba);
0333 val = ufshcd_readl(hba, REG_UFS_PROBE);
0334
0335 sm = val & 0x1f;
0336
0337
0338
0339
0340
0341 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
0342 wait_idle = true;
0343 udelay(50);
0344 continue;
0345 } else if (!wait_idle)
0346 break;
0347
0348 if (wait_idle && (sm == VS_HCE_BASE))
0349 break;
0350 } while (time_checked < timeout);
0351
0352 if (wait_idle && sm != VS_HCE_BASE)
0353 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
0354 }
0355
0356 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
0357 unsigned long max_wait_ms)
0358 {
0359 ktime_t timeout, time_checked;
0360 u32 val;
0361
0362 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
0363 do {
0364 time_checked = ktime_get();
0365 ufs_mtk_dbg_sel(hba);
0366 val = ufshcd_readl(hba, REG_UFS_PROBE);
0367 val = val >> 28;
0368
0369 if (val == state)
0370 return 0;
0371
0372
0373 usleep_range(100, 200);
0374 } while (ktime_before(time_checked, timeout));
0375
0376 if (val == state)
0377 return 0;
0378
0379 return -ETIMEDOUT;
0380 }
0381
0382 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
0383 {
0384 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0385 struct phy *mphy = host->mphy;
0386 struct arm_smccc_res res;
0387 int ret = 0;
0388
0389 if (!mphy || !(on ^ host->mphy_powered_on))
0390 return 0;
0391
0392 if (on) {
0393 if (ufs_mtk_is_va09_supported(hba)) {
0394 ret = regulator_enable(host->reg_va09);
0395 if (ret < 0)
0396 goto out;
0397
0398 usleep_range(200, 210);
0399 ufs_mtk_va09_pwr_ctrl(res, 1);
0400 }
0401 phy_power_on(mphy);
0402 } else {
0403 phy_power_off(mphy);
0404 if (ufs_mtk_is_va09_supported(hba)) {
0405 ufs_mtk_va09_pwr_ctrl(res, 0);
0406 ret = regulator_disable(host->reg_va09);
0407 if (ret < 0)
0408 goto out;
0409 }
0410 }
0411 out:
0412 if (ret) {
0413 dev_info(hba->dev,
0414 "failed to %s va09: %d\n",
0415 on ? "enable" : "disable",
0416 ret);
0417 } else {
0418 host->mphy_powered_on = on;
0419 }
0420
0421 return ret;
0422 }
0423
0424 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
0425 struct clk **clk_out)
0426 {
0427 struct clk *clk;
0428 int err = 0;
0429
0430 clk = devm_clk_get(dev, name);
0431 if (IS_ERR(clk))
0432 err = PTR_ERR(clk);
0433 else
0434 *clk_out = clk;
0435
0436 return err;
0437 }
0438
0439 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
0440 {
0441 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0442 struct ufs_mtk_crypt_cfg *cfg;
0443 struct regulator *reg;
0444 int volt, ret;
0445
0446 if (!ufs_mtk_is_boost_crypt_enabled(hba))
0447 return;
0448
0449 cfg = host->crypt;
0450 volt = cfg->vcore_volt;
0451 reg = cfg->reg_vcore;
0452
0453 ret = clk_prepare_enable(cfg->clk_crypt_mux);
0454 if (ret) {
0455 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
0456 ret);
0457 return;
0458 }
0459
0460 if (boost) {
0461 ret = regulator_set_voltage(reg, volt, INT_MAX);
0462 if (ret) {
0463 dev_info(hba->dev,
0464 "failed to set vcore to %d\n", volt);
0465 goto out;
0466 }
0467
0468 ret = clk_set_parent(cfg->clk_crypt_mux,
0469 cfg->clk_crypt_perf);
0470 if (ret) {
0471 dev_info(hba->dev,
0472 "failed to set clk_crypt_perf\n");
0473 regulator_set_voltage(reg, 0, INT_MAX);
0474 goto out;
0475 }
0476 } else {
0477 ret = clk_set_parent(cfg->clk_crypt_mux,
0478 cfg->clk_crypt_lp);
0479 if (ret) {
0480 dev_info(hba->dev,
0481 "failed to set clk_crypt_lp\n");
0482 goto out;
0483 }
0484
0485 ret = regulator_set_voltage(reg, 0, INT_MAX);
0486 if (ret) {
0487 dev_info(hba->dev,
0488 "failed to set vcore to MIN\n");
0489 }
0490 }
0491 out:
0492 clk_disable_unprepare(cfg->clk_crypt_mux);
0493 }
0494
0495 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
0496 struct clk **clk)
0497 {
0498 int ret;
0499
0500 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
0501 if (ret) {
0502 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
0503 name, ret);
0504 }
0505
0506 return ret;
0507 }
0508
0509 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
0510 {
0511 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0512 struct ufs_mtk_crypt_cfg *cfg;
0513 struct device *dev = hba->dev;
0514 struct regulator *reg;
0515 u32 volt;
0516
0517 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
0518 GFP_KERNEL);
0519 if (!host->crypt)
0520 goto disable_caps;
0521
0522 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
0523 if (IS_ERR(reg)) {
0524 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
0525 PTR_ERR(reg));
0526 goto disable_caps;
0527 }
0528
0529 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
0530 &volt)) {
0531 dev_info(dev, "failed to get boost-crypt-vcore-min");
0532 goto disable_caps;
0533 }
0534
0535 cfg = host->crypt;
0536 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
0537 &cfg->clk_crypt_mux))
0538 goto disable_caps;
0539
0540 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
0541 &cfg->clk_crypt_lp))
0542 goto disable_caps;
0543
0544 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
0545 &cfg->clk_crypt_perf))
0546 goto disable_caps;
0547
0548 cfg->reg_vcore = reg;
0549 cfg->vcore_volt = volt;
0550 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
0551
0552 disable_caps:
0553 return;
0554 }
0555
0556 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
0557 {
0558 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0559
0560 host->reg_va09 = regulator_get(hba->dev, "va09");
0561 if (IS_ERR(host->reg_va09))
0562 dev_info(hba->dev, "failed to get va09");
0563 else
0564 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
0565 }
0566
0567 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
0568 {
0569 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0570 struct device_node *np = hba->dev->of_node;
0571
0572 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
0573 ufs_mtk_init_boost_crypt(hba);
0574
0575 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
0576 ufs_mtk_init_va09_pwr_ctrl(hba);
0577
0578 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
0579 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
0580
0581 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
0582 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
0583
0584 if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
0585 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
0586
0587 dev_info(hba->dev, "caps: 0x%x", host->caps);
0588 }
0589
0590 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
0591 {
0592 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0593
0594 if (!host || !host->pm_qos_init)
0595 return;
0596
0597 cpu_latency_qos_update_request(&host->pm_qos_req,
0598 boost ? 0 : PM_QOS_DEFAULT_VALUE);
0599 }
0600
0601 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
0602 {
0603 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0604
0605 if (on) {
0606 phy_power_on(host->mphy);
0607 ufs_mtk_setup_ref_clk(hba, on);
0608 ufs_mtk_boost_crypt(hba, on);
0609 ufs_mtk_boost_pm_qos(hba, on);
0610 } else {
0611 ufs_mtk_boost_pm_qos(hba, on);
0612 ufs_mtk_boost_crypt(hba, on);
0613 ufs_mtk_setup_ref_clk(hba, on);
0614 phy_power_off(host->mphy);
0615 }
0616 }
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
0627 enum ufs_notify_change_status status)
0628 {
0629 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0630 bool clk_pwr_off = false;
0631 int ret = 0;
0632
0633
0634
0635
0636
0637
0638 if (!host)
0639 return 0;
0640
0641 if (!on && status == PRE_CHANGE) {
0642 if (ufshcd_is_link_off(hba)) {
0643 clk_pwr_off = true;
0644 } else if (ufshcd_is_link_hibern8(hba) ||
0645 (!ufshcd_can_hibern8_during_gating(hba) &&
0646 ufshcd_is_auto_hibern8_enabled(hba))) {
0647
0648
0649
0650
0651
0652 ret = ufs_mtk_wait_link_state(hba,
0653 VS_LINK_HIBERN8,
0654 15);
0655 if (!ret)
0656 clk_pwr_off = true;
0657 }
0658
0659 if (clk_pwr_off)
0660 ufs_mtk_pwr_ctrl(hba, false);
0661 } else if (on && status == POST_CHANGE) {
0662 ufs_mtk_pwr_ctrl(hba, true);
0663 }
0664
0665 return ret;
0666 }
0667
0668 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
0669 {
0670 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0671 int ret, ver = 0;
0672
0673 if (host->hw_ver.major)
0674 return;
0675
0676
0677 host->hw_ver.major = 2;
0678
0679 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
0680 if (!ret) {
0681 if (ver >= UFS_UNIPRO_VER_1_8) {
0682 host->hw_ver.major = 3;
0683
0684
0685
0686
0687 if (hba->ufs_version < ufshci_version(3, 0))
0688 hba->ufs_version = ufshci_version(3, 0);
0689 }
0690 }
0691 }
0692
0693 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
0694 {
0695 return hba->ufs_version;
0696 }
0697
0698 #define MAX_VCC_NAME 30
0699 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
0700 {
0701 struct ufs_vreg_info *info = &hba->vreg_info;
0702 struct device_node *np = hba->dev->of_node;
0703 struct device *dev = hba->dev;
0704 char vcc_name[MAX_VCC_NAME];
0705 struct arm_smccc_res res;
0706 int err, ver;
0707
0708 if (hba->vreg_info.vcc)
0709 return 0;
0710
0711 if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
0712 ufs_mtk_get_vcc_num(res);
0713 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
0714 snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
0715 else
0716 return -ENODEV;
0717 } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
0718 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
0719 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
0720 } else {
0721 return 0;
0722 }
0723
0724 err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
0725 if (err)
0726 return err;
0727
0728 err = ufshcd_get_vreg(dev, info->vcc);
0729 if (err)
0730 return err;
0731
0732 err = regulator_enable(info->vcc->reg);
0733 if (!err) {
0734 info->vcc->enabled = true;
0735 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
0736 }
0737
0738 return err;
0739 }
0740
0741 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
0742 {
0743 struct ufs_vreg_info *info = &hba->vreg_info;
0744 struct ufs_vreg **vreg_on, **vreg_off;
0745
0746 if (hba->dev_info.wspecversion >= 0x0300) {
0747 vreg_on = &info->vccq;
0748 vreg_off = &info->vccq2;
0749 } else {
0750 vreg_on = &info->vccq2;
0751 vreg_off = &info->vccq;
0752 }
0753
0754 if (*vreg_on)
0755 (*vreg_on)->always_on = true;
0756
0757 if (*vreg_off) {
0758 regulator_disable((*vreg_off)->reg);
0759 devm_kfree(hba->dev, (*vreg_off)->name);
0760 devm_kfree(hba->dev, *vreg_off);
0761 *vreg_off = NULL;
0762 }
0763 }
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 static int ufs_mtk_init(struct ufs_hba *hba)
0776 {
0777 const struct of_device_id *id;
0778 struct device *dev = hba->dev;
0779 struct ufs_mtk_host *host;
0780 int err = 0;
0781
0782 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
0783 if (!host) {
0784 err = -ENOMEM;
0785 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
0786 goto out;
0787 }
0788
0789 host->hba = hba;
0790 ufshcd_set_variant(hba, host);
0791
0792 id = of_match_device(ufs_mtk_of_match, dev);
0793 if (!id) {
0794 err = -EINVAL;
0795 goto out;
0796 }
0797
0798
0799 ufs_mtk_init_host_caps(hba);
0800
0801 err = ufs_mtk_bind_mphy(hba);
0802 if (err)
0803 goto out_variant_clear;
0804
0805 ufs_mtk_init_reset(hba);
0806
0807
0808 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
0809
0810
0811 hba->caps |= UFSHCD_CAP_CLK_GATING;
0812
0813
0814 hba->caps |= UFSHCD_CAP_CRYPTO;
0815
0816
0817 hba->caps |= UFSHCD_CAP_WB_EN;
0818 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
0819 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
0820
0821 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
0822 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
0823
0824
0825
0826
0827
0828
0829
0830
0831 ufs_mtk_mphy_power_on(hba, true);
0832 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
0833
0834 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
0835
0836 goto out;
0837
0838 out_variant_clear:
0839 ufshcd_set_variant(hba, NULL);
0840 out:
0841 return err;
0842 }
0843
0844 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
0845 struct ufs_pa_layer_attr *dev_req_params)
0846 {
0847 if (!ufs_mtk_is_pmc_via_fastauto(hba))
0848 return false;
0849
0850 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
0851 return false;
0852
0853 if (dev_req_params->pwr_tx != FAST_MODE &&
0854 dev_req_params->gear_tx < UFS_HS_G4)
0855 return false;
0856
0857 if (dev_req_params->pwr_rx != FAST_MODE &&
0858 dev_req_params->gear_rx < UFS_HS_G4)
0859 return false;
0860
0861 return true;
0862 }
0863
0864 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
0865 struct ufs_pa_layer_attr *dev_max_params,
0866 struct ufs_pa_layer_attr *dev_req_params)
0867 {
0868 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0869 struct ufs_dev_params host_cap;
0870 int ret;
0871
0872 ufshcd_init_pwr_dev_param(&host_cap);
0873 host_cap.hs_rx_gear = UFS_HS_G5;
0874 host_cap.hs_tx_gear = UFS_HS_G5;
0875
0876 ret = ufshcd_get_pwr_dev_param(&host_cap,
0877 dev_max_params,
0878 dev_req_params);
0879 if (ret) {
0880 pr_info("%s: failed to determine capabilities\n",
0881 __func__);
0882 }
0883
0884 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
0885 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
0886 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
0887
0888 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
0889 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
0890
0891 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
0892 dev_req_params->lane_tx);
0893 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
0894 dev_req_params->lane_rx);
0895 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
0896 dev_req_params->hs_rate);
0897
0898 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
0899 PA_NO_ADAPT);
0900
0901 ret = ufshcd_uic_change_pwr_mode(hba,
0902 FASTAUTO_MODE << 4 | FASTAUTO_MODE);
0903
0904 if (ret) {
0905 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
0906 __func__, ret);
0907 }
0908 }
0909
0910 if (host->hw_ver.major >= 3) {
0911 ret = ufshcd_dme_configure_adapt(hba,
0912 dev_req_params->gear_tx,
0913 PA_INITIAL_ADAPT);
0914 }
0915
0916 return ret;
0917 }
0918
0919 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
0920 enum ufs_notify_change_status stage,
0921 struct ufs_pa_layer_attr *dev_max_params,
0922 struct ufs_pa_layer_attr *dev_req_params)
0923 {
0924 int ret = 0;
0925
0926 switch (stage) {
0927 case PRE_CHANGE:
0928 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
0929 dev_req_params);
0930 break;
0931 case POST_CHANGE:
0932 break;
0933 default:
0934 ret = -EINVAL;
0935 break;
0936 }
0937
0938 return ret;
0939 }
0940
0941 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
0942 {
0943 int ret;
0944 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
0945
0946 ret = ufshcd_dme_set(hba,
0947 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
0948 lpm ? 1 : 0);
0949 if (!ret || !lpm) {
0950
0951
0952
0953
0954
0955 host->unipro_lpm = lpm;
0956 }
0957
0958 return ret;
0959 }
0960
0961 static int ufs_mtk_pre_link(struct ufs_hba *hba)
0962 {
0963 int ret;
0964 u32 tmp;
0965
0966 ufs_mtk_get_controller_version(hba);
0967
0968 ret = ufs_mtk_unipro_set_lpm(hba, false);
0969 if (ret)
0970 return ret;
0971
0972
0973
0974
0975
0976
0977 ret = ufshcd_disable_host_tx_lcc(hba);
0978 if (ret)
0979 return ret;
0980
0981
0982 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
0983 if (ret)
0984 return ret;
0985
0986 tmp &= ~(1 << 6);
0987
0988 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
0989
0990 return ret;
0991 }
0992
0993 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
0994 {
0995 u32 ah_ms;
0996
0997 if (ufshcd_is_clkgating_allowed(hba)) {
0998 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
0999 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1000 hba->ahit);
1001 else
1002 ah_ms = 10;
1003 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1004 }
1005 }
1006
1007 static int ufs_mtk_post_link(struct ufs_hba *hba)
1008 {
1009
1010 ufs_mtk_cfg_unipro_cg(hba, true);
1011
1012
1013 if (ufshcd_is_auto_hibern8_supported(hba))
1014 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1015 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1016
1017 ufs_mtk_setup_clk_gating(hba);
1018
1019 return 0;
1020 }
1021
1022 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1023 enum ufs_notify_change_status stage)
1024 {
1025 int ret = 0;
1026
1027 switch (stage) {
1028 case PRE_CHANGE:
1029 ret = ufs_mtk_pre_link(hba);
1030 break;
1031 case POST_CHANGE:
1032 ret = ufs_mtk_post_link(hba);
1033 break;
1034 default:
1035 ret = -EINVAL;
1036 break;
1037 }
1038
1039 return ret;
1040 }
1041
1042 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1043 {
1044 struct arm_smccc_res res;
1045
1046
1047 ufshcd_hba_stop(hba);
1048
1049 ufs_mtk_device_reset_ctrl(0, res);
1050
1051
1052
1053
1054
1055
1056
1057
1058 usleep_range(10, 15);
1059
1060 ufs_mtk_device_reset_ctrl(1, res);
1061
1062
1063 usleep_range(10000, 15000);
1064
1065 dev_info(hba->dev, "device reset done\n");
1066
1067 return 0;
1068 }
1069
1070 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1071 {
1072 int err;
1073
1074 err = ufshcd_hba_enable(hba);
1075 if (err)
1076 return err;
1077
1078 err = ufs_mtk_unipro_set_lpm(hba, false);
1079 if (err)
1080 return err;
1081
1082 err = ufshcd_uic_hibern8_exit(hba);
1083 if (!err)
1084 ufshcd_set_link_active(hba);
1085 else
1086 return err;
1087
1088 err = ufshcd_make_hba_operational(hba);
1089 if (err)
1090 return err;
1091
1092 return 0;
1093 }
1094
1095 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1096 {
1097 int err;
1098
1099
1100 ufshcd_writel(hba,
1101 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1102 REG_UFS_XOUFS_CTRL);
1103
1104 err = ufs_mtk_unipro_set_lpm(hba, true);
1105 if (err) {
1106
1107 ufs_mtk_unipro_set_lpm(hba, false);
1108 return err;
1109 }
1110
1111 return 0;
1112 }
1113
1114 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1115 {
1116 struct ufs_vreg *vccqx = NULL;
1117
1118 if (hba->vreg_info.vccq)
1119 vccqx = hba->vreg_info.vccq;
1120 else
1121 vccqx = hba->vreg_info.vccq2;
1122
1123 regulator_set_mode(vccqx->reg,
1124 lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1125 }
1126
1127 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1128 {
1129 struct arm_smccc_res res;
1130
1131 ufs_mtk_device_pwr_ctrl(!lpm,
1132 (unsigned long)hba->dev_info.wspecversion,
1133 res);
1134 }
1135
1136 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1137 {
1138 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1139 return;
1140
1141
1142 if (!hba->vreg_info.vcc)
1143 return;
1144
1145
1146 if (lpm && ufshcd_is_ufs_dev_active(hba))
1147 return;
1148
1149
1150 if (lpm && hba->vreg_info.vcc->enabled)
1151 return;
1152
1153 if (lpm) {
1154 ufs_mtk_vccqx_set_lpm(hba, lpm);
1155 ufs_mtk_vsx_set_lpm(hba, lpm);
1156 } else {
1157 ufs_mtk_vsx_set_lpm(hba, lpm);
1158 ufs_mtk_vccqx_set_lpm(hba, lpm);
1159 }
1160 }
1161
1162 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1163 {
1164 int ret;
1165
1166
1167 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1168
1169
1170 ufs_mtk_wait_idle_state(hba, 5);
1171
1172 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1173 if (ret)
1174 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1175 }
1176
1177 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1178 enum ufs_notify_change_status status)
1179 {
1180 int err;
1181 struct arm_smccc_res res;
1182
1183 if (status == PRE_CHANGE) {
1184 if (!ufshcd_is_auto_hibern8_supported(hba))
1185 return 0;
1186 ufs_mtk_auto_hibern8_disable(hba);
1187 return 0;
1188 }
1189
1190 if (ufshcd_is_link_hibern8(hba)) {
1191 err = ufs_mtk_link_set_lpm(hba);
1192 if (err)
1193 goto fail;
1194 }
1195
1196 if (!ufshcd_is_link_active(hba)) {
1197
1198
1199
1200
1201
1202 err = ufs_mtk_mphy_power_on(hba, false);
1203 if (err)
1204 goto fail;
1205 }
1206
1207 if (ufshcd_is_link_off(hba))
1208 ufs_mtk_device_reset_ctrl(0, res);
1209
1210 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1211
1212 return 0;
1213 fail:
1214
1215
1216
1217
1218
1219 ufshcd_set_link_off(hba);
1220 return -EAGAIN;
1221 }
1222
1223 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1224 {
1225 int err;
1226 struct arm_smccc_res res;
1227
1228 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1229 ufs_mtk_dev_vreg_set_lpm(hba, false);
1230
1231 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1232
1233 err = ufs_mtk_mphy_power_on(hba, true);
1234 if (err)
1235 goto fail;
1236
1237 if (ufshcd_is_link_hibern8(hba)) {
1238 err = ufs_mtk_link_set_hpm(hba);
1239 if (err)
1240 goto fail;
1241 }
1242
1243 return 0;
1244 fail:
1245 return ufshcd_link_recovery(hba);
1246 }
1247
1248 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1249 {
1250 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1251
1252 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1253
1254 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1255 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1256 "MPHY Ctrl ");
1257
1258
1259 ufs_mtk_dbg_sel(hba);
1260 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1261 }
1262
1263 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1264 {
1265 struct ufs_dev_info *dev_info = &hba->dev_info;
1266 u16 mid = dev_info->wmanufacturerid;
1267
1268 if (mid == UFS_VENDOR_SAMSUNG) {
1269 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1270 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1271 }
1272
1273
1274
1275
1276
1277
1278 if (mid == UFS_VENDOR_SAMSUNG)
1279 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1280 else if (mid == UFS_VENDOR_SKHYNIX)
1281 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1282 else if (mid == UFS_VENDOR_TOSHIBA)
1283 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1284 else
1285 ufs_mtk_setup_ref_clk_wait_us(hba,
1286 REFCLK_DEFAULT_WAIT_US);
1287 return 0;
1288 }
1289
1290 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1291 {
1292 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1293
1294 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1295 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1296 hba->vreg_info.vcc->always_on = true;
1297
1298
1299
1300
1301 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1302 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1303 }
1304
1305 ufs_mtk_vreg_fix_vcc(hba);
1306 ufs_mtk_vreg_fix_vccqx(hba);
1307 }
1308
1309 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1310 enum ufs_event_type evt, void *data)
1311 {
1312 unsigned int val = *(u32 *)data;
1313
1314 trace_ufs_mtk_event(evt, val);
1315 }
1316
1317
1318
1319
1320
1321
1322
1323 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1324 .name = "mediatek.ufshci",
1325 .init = ufs_mtk_init,
1326 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1327 .setup_clocks = ufs_mtk_setup_clocks,
1328 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1329 .link_startup_notify = ufs_mtk_link_startup_notify,
1330 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1331 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1332 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1333 .suspend = ufs_mtk_suspend,
1334 .resume = ufs_mtk_resume,
1335 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1336 .device_reset = ufs_mtk_device_reset,
1337 .event_notify = ufs_mtk_event_notify,
1338 };
1339
1340
1341
1342
1343
1344
1345
1346 static int ufs_mtk_probe(struct platform_device *pdev)
1347 {
1348 int err;
1349 struct device *dev = &pdev->dev;
1350 struct device_node *reset_node;
1351 struct platform_device *reset_pdev;
1352 struct device_link *link;
1353
1354 reset_node = of_find_compatible_node(NULL, NULL,
1355 "ti,syscon-reset");
1356 if (!reset_node) {
1357 dev_notice(dev, "find ti,syscon-reset fail\n");
1358 goto skip_reset;
1359 }
1360 reset_pdev = of_find_device_by_node(reset_node);
1361 if (!reset_pdev) {
1362 dev_notice(dev, "find reset_pdev fail\n");
1363 goto skip_reset;
1364 }
1365 link = device_link_add(dev, &reset_pdev->dev,
1366 DL_FLAG_AUTOPROBE_CONSUMER);
1367 put_device(&reset_pdev->dev);
1368 if (!link) {
1369 dev_notice(dev, "add reset device_link fail\n");
1370 goto skip_reset;
1371 }
1372
1373 if (link->status == DL_STATE_DORMANT) {
1374 err = -EPROBE_DEFER;
1375 goto out;
1376 }
1377
1378 skip_reset:
1379
1380 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1381
1382 out:
1383 if (err)
1384 dev_info(dev, "probe failed %d\n", err);
1385
1386 of_node_put(reset_node);
1387 return err;
1388 }
1389
1390
1391
1392
1393
1394
1395
1396 static int ufs_mtk_remove(struct platform_device *pdev)
1397 {
1398 struct ufs_hba *hba = platform_get_drvdata(pdev);
1399
1400 pm_runtime_get_sync(&(pdev)->dev);
1401 ufshcd_remove(hba);
1402 return 0;
1403 }
1404
1405 #ifdef CONFIG_PM_SLEEP
1406 static int ufs_mtk_system_suspend(struct device *dev)
1407 {
1408 struct ufs_hba *hba = dev_get_drvdata(dev);
1409 int ret;
1410
1411 ret = ufshcd_system_suspend(dev);
1412 if (ret)
1413 return ret;
1414
1415 ufs_mtk_dev_vreg_set_lpm(hba, true);
1416
1417 return 0;
1418 }
1419
1420 static int ufs_mtk_system_resume(struct device *dev)
1421 {
1422 struct ufs_hba *hba = dev_get_drvdata(dev);
1423
1424 ufs_mtk_dev_vreg_set_lpm(hba, false);
1425
1426 return ufshcd_system_resume(dev);
1427 }
1428 #endif
1429
1430 static int ufs_mtk_runtime_suspend(struct device *dev)
1431 {
1432 struct ufs_hba *hba = dev_get_drvdata(dev);
1433 int ret = 0;
1434
1435 ret = ufshcd_runtime_suspend(dev);
1436 if (ret)
1437 return ret;
1438
1439 ufs_mtk_dev_vreg_set_lpm(hba, true);
1440
1441 return 0;
1442 }
1443
1444 static int ufs_mtk_runtime_resume(struct device *dev)
1445 {
1446 struct ufs_hba *hba = dev_get_drvdata(dev);
1447
1448 ufs_mtk_dev_vreg_set_lpm(hba, false);
1449
1450 return ufshcd_runtime_resume(dev);
1451 }
1452
1453 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1454 SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1455 ufs_mtk_system_resume)
1456 SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1457 ufs_mtk_runtime_resume, NULL)
1458 .prepare = ufshcd_suspend_prepare,
1459 .complete = ufshcd_resume_complete,
1460 };
1461
1462 static struct platform_driver ufs_mtk_pltform = {
1463 .probe = ufs_mtk_probe,
1464 .remove = ufs_mtk_remove,
1465 .shutdown = ufshcd_pltfrm_shutdown,
1466 .driver = {
1467 .name = "ufshcd-mtk",
1468 .pm = &ufs_mtk_pm_ops,
1469 .of_match_table = ufs_mtk_of_match,
1470 },
1471 };
1472
1473 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1474 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1475 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1476 MODULE_LICENSE("GPL v2");
1477
1478 module_platform_driver(ufs_mtk_pltform);