Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Intel Combo-PHY driver
0004  *
0005  * Copyright (C) 2019-2020 Intel Corporation.
0006  */
0007 
0008 #include <linux/bitfield.h>
0009 #include <linux/clk.h>
0010 #include <linux/iopoll.h>
0011 #include <linux/mfd/syscon.h>
0012 #include <linux/module.h>
0013 #include <linux/mutex.h>
0014 #include <linux/of.h>
0015 #include <linux/phy/phy.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/regmap.h>
0018 #include <linux/reset.h>
0019 
0020 #include <dt-bindings/phy/phy.h>
0021 
0022 #define PCIE_PHY_GEN_CTRL   0x00
0023 #define PCIE_PHY_CLK_PAD    BIT(17)
0024 
0025 #define PAD_DIS_CFG     0x174
0026 
0027 #define PCS_XF_ATE_OVRD_IN_2    0x3008
0028 #define ADAPT_REQ_MSK       GENMASK(5, 4)
0029 
0030 #define PCS_XF_RX_ADAPT_ACK 0x3010
0031 #define RX_ADAPT_ACK_BIT    BIT(0)
0032 
0033 #define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
0034 #define REG_COMBO_MODE(x)   ((x) * 0x200)
0035 #define REG_CLK_DISABLE(x)  ((x) * 0x200 + 0x124)
0036 
0037 #define COMBO_PHY_ID(x)     ((x)->parent->id)
0038 #define PHY_ID(x)       ((x)->id)
0039 
0040 #define CLK_100MHZ      100000000
0041 #define CLK_156_25MHZ       156250000
0042 
0043 static const unsigned long intel_iphy_clk_rates[] = {
0044     CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
0045 };
0046 
0047 enum {
0048     PHY_0,
0049     PHY_1,
0050     PHY_MAX_NUM
0051 };
0052 
0053 /*
0054  * Clock Register bit fields to enable clocks
0055  * for ComboPhy according to the mode.
0056  */
0057 enum intel_phy_mode {
0058     PHY_PCIE_MODE = 0,
0059     PHY_XPCS_MODE,
0060     PHY_SATA_MODE,
0061 };
0062 
0063 /* ComboPhy mode Register values */
0064 enum intel_combo_mode {
0065     PCIE0_PCIE1_MODE = 0,
0066     PCIE_DL_MODE,
0067     RXAUI_MODE,
0068     XPCS0_XPCS1_MODE,
0069     SATA0_SATA1_MODE,
0070 };
0071 
0072 enum aggregated_mode {
0073     PHY_SL_MODE,
0074     PHY_DL_MODE,
0075 };
0076 
0077 struct intel_combo_phy;
0078 
0079 struct intel_cbphy_iphy {
0080     struct phy      *phy;
0081     struct intel_combo_phy  *parent;
0082     struct reset_control    *app_rst;
0083     u32         id;
0084 };
0085 
0086 struct intel_combo_phy {
0087     struct device       *dev;
0088     struct clk      *core_clk;
0089     unsigned long       clk_rate;
0090     void __iomem        *app_base;
0091     void __iomem        *cr_base;
0092     struct regmap       *syscfg;
0093     struct regmap       *hsiocfg;
0094     u32         id;
0095     u32         bid;
0096     struct reset_control    *phy_rst;
0097     struct reset_control    *core_rst;
0098     struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
0099     enum intel_phy_mode phy_mode;
0100     enum aggregated_mode    aggr_mode;
0101     u32         init_cnt;
0102     struct mutex        lock;
0103 };
0104 
0105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
0106 {
0107     struct intel_combo_phy *cbphy = iphy->parent;
0108     u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
0109     u32 val;
0110 
0111     /* Register: 0 is enable, 1 is disable */
0112     val = set ? 0 : mask;
0113 
0114     return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
0115                   mask, val);
0116 }
0117 
0118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
0119 {
0120     struct intel_combo_phy *cbphy = iphy->parent;
0121     u32 mask = BIT(cbphy->id * 2 + iphy->id);
0122     u32 val;
0123 
0124     /* Register: 0 is enable, 1 is disable */
0125     val = set ? 0 : mask;
0126 
0127     return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
0128 }
0129 
0130 static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
0131                       u32 mask, u32 val)
0132 {
0133     u32 reg_val;
0134 
0135     reg_val = readl(base + reg);
0136     reg_val &= ~mask;
0137     reg_val |= val;
0138     writel(reg_val, base + reg);
0139 }
0140 
0141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
0142                 int (*phy_cfg)(struct intel_cbphy_iphy *))
0143 {
0144     struct intel_combo_phy *cbphy = iphy->parent;
0145     int ret;
0146 
0147     ret = phy_cfg(iphy);
0148     if (ret)
0149         return ret;
0150 
0151     if (cbphy->aggr_mode != PHY_DL_MODE)
0152         return 0;
0153 
0154     return phy_cfg(&cbphy->iphy[PHY_1]);
0155 }
0156 
0157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
0158 {
0159     struct intel_combo_phy *cbphy = iphy->parent;
0160     int ret;
0161 
0162     ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
0163     if (ret) {
0164         dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
0165         return ret;
0166     }
0167 
0168     if (cbphy->init_cnt)
0169         return 0;
0170 
0171     combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
0172                    PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
0173 
0174     /* Delay for stable clock PLL */
0175     usleep_range(50, 100);
0176 
0177     return 0;
0178 }
0179 
0180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
0181 {
0182     struct intel_combo_phy *cbphy = iphy->parent;
0183     int ret;
0184 
0185     ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
0186     if (ret) {
0187         dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
0188         return ret;
0189     }
0190 
0191     if (cbphy->init_cnt)
0192         return 0;
0193 
0194     combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
0195                    PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
0196 
0197     return 0;
0198 }
0199 
0200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
0201 {
0202     enum intel_combo_mode cb_mode;
0203     enum aggregated_mode aggr = cbphy->aggr_mode;
0204     struct device *dev = cbphy->dev;
0205     enum intel_phy_mode mode;
0206     int ret;
0207 
0208     mode = cbphy->phy_mode;
0209 
0210     switch (mode) {
0211     case PHY_PCIE_MODE:
0212         cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
0213         break;
0214 
0215     case PHY_XPCS_MODE:
0216         cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
0217         break;
0218 
0219     case PHY_SATA_MODE:
0220         if (aggr == PHY_DL_MODE) {
0221             dev_err(dev, "Mode:%u not support dual lane!\n", mode);
0222             return -EINVAL;
0223         }
0224 
0225         cb_mode = SATA0_SATA1_MODE;
0226         break;
0227     default:
0228         return -EINVAL;
0229     }
0230 
0231     ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
0232     if (ret)
0233         dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
0234 
0235     return ret;
0236 }
0237 
0238 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
0239 {
0240     reset_control_assert(cbphy->core_rst);
0241     reset_control_assert(cbphy->phy_rst);
0242 }
0243 
0244 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
0245 {
0246     reset_control_deassert(cbphy->core_rst);
0247     reset_control_deassert(cbphy->phy_rst);
0248     /* Delay to ensure reset process is done */
0249     usleep_range(10, 20);
0250 }
0251 
0252 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
0253 {
0254     struct intel_combo_phy *cbphy = iphy->parent;
0255     int ret;
0256 
0257     if (!cbphy->init_cnt) {
0258         ret = clk_prepare_enable(cbphy->core_clk);
0259         if (ret) {
0260             dev_err(cbphy->dev, "Clock enable failed!\n");
0261             return ret;
0262         }
0263 
0264         ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
0265         if (ret) {
0266             dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
0267                 cbphy->clk_rate);
0268             goto clk_err;
0269         }
0270 
0271         intel_cbphy_rst_assert(cbphy);
0272         intel_cbphy_rst_deassert(cbphy);
0273         ret = intel_cbphy_set_mode(cbphy);
0274         if (ret)
0275             goto clk_err;
0276     }
0277 
0278     ret = intel_cbphy_iphy_enable(iphy, true);
0279     if (ret) {
0280         dev_err(cbphy->dev, "Failed enabling PHY core\n");
0281         goto clk_err;
0282     }
0283 
0284     ret = reset_control_deassert(iphy->app_rst);
0285     if (ret) {
0286         dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
0287             COMBO_PHY_ID(iphy), PHY_ID(iphy));
0288         goto clk_err;
0289     }
0290 
0291     /* Delay to ensure reset process is done */
0292     udelay(1);
0293 
0294     return 0;
0295 
0296 clk_err:
0297     clk_disable_unprepare(cbphy->core_clk);
0298 
0299     return ret;
0300 }
0301 
0302 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
0303 {
0304     struct intel_combo_phy *cbphy = iphy->parent;
0305     int ret;
0306 
0307     ret = reset_control_assert(iphy->app_rst);
0308     if (ret) {
0309         dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
0310             COMBO_PHY_ID(iphy), PHY_ID(iphy));
0311         return ret;
0312     }
0313 
0314     ret = intel_cbphy_iphy_enable(iphy, false);
0315     if (ret) {
0316         dev_err(cbphy->dev, "Failed disabling PHY core\n");
0317         return ret;
0318     }
0319 
0320     if (cbphy->init_cnt)
0321         return 0;
0322 
0323     clk_disable_unprepare(cbphy->core_clk);
0324     intel_cbphy_rst_assert(cbphy);
0325 
0326     return 0;
0327 }
0328 
0329 static int intel_cbphy_init(struct phy *phy)
0330 {
0331     struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
0332     struct intel_combo_phy *cbphy = iphy->parent;
0333     int ret;
0334 
0335     mutex_lock(&cbphy->lock);
0336     ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
0337     if (ret)
0338         goto err;
0339 
0340     if (cbphy->phy_mode == PHY_PCIE_MODE) {
0341         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
0342         if (ret)
0343             goto err;
0344     }
0345 
0346     cbphy->init_cnt++;
0347 
0348 err:
0349     mutex_unlock(&cbphy->lock);
0350 
0351     return ret;
0352 }
0353 
0354 static int intel_cbphy_exit(struct phy *phy)
0355 {
0356     struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
0357     struct intel_combo_phy *cbphy = iphy->parent;
0358     int ret;
0359 
0360     mutex_lock(&cbphy->lock);
0361     cbphy->init_cnt--;
0362     if (cbphy->phy_mode == PHY_PCIE_MODE) {
0363         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
0364         if (ret)
0365             goto err;
0366     }
0367 
0368     ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
0369 
0370 err:
0371     mutex_unlock(&cbphy->lock);
0372 
0373     return ret;
0374 }
0375 
0376 static int intel_cbphy_calibrate(struct phy *phy)
0377 {
0378     struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
0379     struct intel_combo_phy *cbphy = iphy->parent;
0380     void __iomem *cr_base = cbphy->cr_base;
0381     int val, ret, id;
0382 
0383     if (cbphy->phy_mode != PHY_XPCS_MODE)
0384         return 0;
0385 
0386     id = PHY_ID(iphy);
0387 
0388     /* trigger auto RX adaptation */
0389     combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
0390                    ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
0391     /* Wait RX adaptation to finish */
0392     ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
0393                  val, val & RX_ADAPT_ACK_BIT, 10, 5000);
0394     if (ret)
0395         dev_err(cbphy->dev, "RX Adaptation failed!\n");
0396     else
0397         dev_dbg(cbphy->dev, "RX Adaptation success!\n");
0398 
0399     /* Stop RX adaptation */
0400     combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
0401                    ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
0402 
0403     return ret;
0404 }
0405 
0406 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
0407 {
0408     struct device *dev = cbphy->dev;
0409     struct platform_device *pdev = to_platform_device(dev);
0410     struct fwnode_handle *fwnode = dev_fwnode(dev);
0411     struct fwnode_reference_args ref;
0412     int ret;
0413     u32 val;
0414 
0415     cbphy->core_clk = devm_clk_get(dev, NULL);
0416     if (IS_ERR(cbphy->core_clk)) {
0417         ret = PTR_ERR(cbphy->core_clk);
0418         if (ret != -EPROBE_DEFER)
0419             dev_err(dev, "Get clk failed:%d!\n", ret);
0420         return ret;
0421     }
0422 
0423     cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
0424     if (IS_ERR(cbphy->core_rst)) {
0425         ret = PTR_ERR(cbphy->core_rst);
0426         if (ret != -EPROBE_DEFER)
0427             dev_err(dev, "Get core reset control err: %d!\n", ret);
0428         return ret;
0429     }
0430 
0431     cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
0432     if (IS_ERR(cbphy->phy_rst)) {
0433         ret = PTR_ERR(cbphy->phy_rst);
0434         if (ret != -EPROBE_DEFER)
0435             dev_err(dev, "Get PHY reset control err: %d!\n", ret);
0436         return ret;
0437     }
0438 
0439     cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
0440     if (IS_ERR(cbphy->iphy[0].app_rst)) {
0441         ret = PTR_ERR(cbphy->iphy[0].app_rst);
0442         if (ret != -EPROBE_DEFER)
0443             dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
0444         return ret;
0445     }
0446 
0447     cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
0448     if (IS_ERR(cbphy->iphy[1].app_rst)) {
0449         ret = PTR_ERR(cbphy->iphy[1].app_rst);
0450         if (ret != -EPROBE_DEFER)
0451             dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
0452         return ret;
0453     }
0454 
0455     cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
0456     if (IS_ERR(cbphy->app_base))
0457         return PTR_ERR(cbphy->app_base);
0458 
0459     cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
0460     if (IS_ERR(cbphy->cr_base))
0461         return PTR_ERR(cbphy->cr_base);
0462 
0463     /*
0464      * syscfg and hsiocfg variables stores the handle of the registers set
0465      * in which ComboPhy subsystem specific registers are subset. Using
0466      * Register map framework to access the registers set.
0467      */
0468     ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
0469                          1, 0, &ref);
0470     if (ret < 0)
0471         return ret;
0472 
0473     cbphy->id = ref.args[0];
0474     cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
0475     fwnode_handle_put(ref.fwnode);
0476 
0477     ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
0478                          0, &ref);
0479     if (ret < 0)
0480         return ret;
0481 
0482     cbphy->bid = ref.args[0];
0483     cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
0484     fwnode_handle_put(ref.fwnode);
0485 
0486     ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
0487     if (ret)
0488         return ret;
0489 
0490     switch (val) {
0491     case PHY_TYPE_PCIE:
0492         cbphy->phy_mode = PHY_PCIE_MODE;
0493         break;
0494 
0495     case PHY_TYPE_SATA:
0496         cbphy->phy_mode = PHY_SATA_MODE;
0497         break;
0498 
0499     case PHY_TYPE_XPCS:
0500         cbphy->phy_mode = PHY_XPCS_MODE;
0501         break;
0502 
0503     default:
0504         dev_err(dev, "Invalid PHY mode: %u\n", val);
0505         return -EINVAL;
0506     }
0507 
0508     cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
0509 
0510     if (fwnode_property_present(fwnode, "intel,aggregation"))
0511         cbphy->aggr_mode = PHY_DL_MODE;
0512     else
0513         cbphy->aggr_mode = PHY_SL_MODE;
0514 
0515     return 0;
0516 }
0517 
0518 static const struct phy_ops intel_cbphy_ops = {
0519     .init       = intel_cbphy_init,
0520     .exit       = intel_cbphy_exit,
0521     .calibrate  = intel_cbphy_calibrate,
0522     .owner      = THIS_MODULE,
0523 };
0524 
0525 static struct phy *intel_cbphy_xlate(struct device *dev,
0526                      struct of_phandle_args *args)
0527 {
0528     struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
0529     u32 iphy_id;
0530 
0531     if (args->args_count < 1) {
0532         dev_err(dev, "Invalid number of arguments\n");
0533         return ERR_PTR(-EINVAL);
0534     }
0535 
0536     iphy_id = args->args[0];
0537     if (iphy_id >= PHY_MAX_NUM) {
0538         dev_err(dev, "Invalid phy instance %d\n", iphy_id);
0539         return ERR_PTR(-EINVAL);
0540     }
0541 
0542     if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
0543         dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
0544         return ERR_PTR(-EINVAL);
0545     }
0546 
0547     return cbphy->iphy[iphy_id].phy;
0548 }
0549 
0550 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
0551 {
0552     struct phy_provider *phy_provider;
0553     struct device *dev = cbphy->dev;
0554     struct intel_cbphy_iphy *iphy;
0555     int i;
0556 
0557     for (i = 0; i < PHY_MAX_NUM; i++) {
0558         iphy = &cbphy->iphy[i];
0559         iphy->parent = cbphy;
0560         iphy->id = i;
0561 
0562         /* In dual lane mode skip phy creation for the second phy */
0563         if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
0564             continue;
0565 
0566         iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
0567         if (IS_ERR(iphy->phy)) {
0568             dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
0569                 COMBO_PHY_ID(iphy), PHY_ID(iphy));
0570 
0571             return PTR_ERR(iphy->phy);
0572         }
0573 
0574         phy_set_drvdata(iphy->phy, iphy);
0575     }
0576 
0577     dev_set_drvdata(dev, cbphy);
0578     phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
0579     if (IS_ERR(phy_provider))
0580         dev_err(dev, "Register PHY provider failed!\n");
0581 
0582     return PTR_ERR_OR_ZERO(phy_provider);
0583 }
0584 
0585 static int intel_cbphy_probe(struct platform_device *pdev)
0586 {
0587     struct device *dev = &pdev->dev;
0588     struct intel_combo_phy *cbphy;
0589     int ret;
0590 
0591     cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
0592     if (!cbphy)
0593         return -ENOMEM;
0594 
0595     cbphy->dev = dev;
0596     cbphy->init_cnt = 0;
0597     mutex_init(&cbphy->lock);
0598     ret = intel_cbphy_fwnode_parse(cbphy);
0599     if (ret)
0600         return ret;
0601 
0602     platform_set_drvdata(pdev, cbphy);
0603 
0604     return intel_cbphy_create(cbphy);
0605 }
0606 
0607 static int intel_cbphy_remove(struct platform_device *pdev)
0608 {
0609     struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
0610 
0611     intel_cbphy_rst_assert(cbphy);
0612     clk_disable_unprepare(cbphy->core_clk);
0613     return 0;
0614 }
0615 
0616 static const struct of_device_id of_intel_cbphy_match[] = {
0617     { .compatible = "intel,combo-phy" },
0618     { .compatible = "intel,combophy-lgm" },
0619     {}
0620 };
0621 
0622 static struct platform_driver intel_cbphy_driver = {
0623     .probe = intel_cbphy_probe,
0624     .remove = intel_cbphy_remove,
0625     .driver = {
0626         .name = "intel-combo-phy",
0627         .of_match_table = of_intel_cbphy_match,
0628     }
0629 };
0630 
0631 module_platform_driver(intel_cbphy_driver);
0632 
0633 MODULE_DESCRIPTION("Intel Combo-phy driver");
0634 MODULE_LICENSE("GPL v2");