Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Universal Flash Storage Host controller PCI glue driver
0004  *
0005  * Copyright (C) 2011-2013 Samsung India Software Operations
0006  *
0007  * Authors:
0008  *  Santosh Yaraganavi <santosh.sy@samsung.com>
0009  *  Vinayak Holikatti <h.vinayak@samsung.com>
0010  */
0011 
0012 #include <ufs/ufshcd.h>
0013 #include <linux/delay.h>
0014 #include <linux/module.h>
0015 #include <linux/pci.h>
0016 #include <linux/pm_runtime.h>
0017 #include <linux/pm_qos.h>
0018 #include <linux/debugfs.h>
0019 #include <linux/uuid.h>
0020 #include <linux/acpi.h>
0021 #include <linux/gpio/consumer.h>
0022 
0023 struct ufs_host {
0024     void (*late_init)(struct ufs_hba *hba);
0025 };
0026 
0027 enum intel_ufs_dsm_func_id {
0028     INTEL_DSM_FNS       =  0,
0029     INTEL_DSM_RESET     =  1,
0030 };
0031 
0032 struct intel_host {
0033     struct ufs_host ufs_host;
0034     u32     dsm_fns;
0035     u32     active_ltr;
0036     u32     idle_ltr;
0037     struct dentry   *debugfs_root;
0038     struct gpio_desc *reset_gpio;
0039 };
0040 
0041 static const guid_t intel_dsm_guid =
0042     GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0043           0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
0044 
0045 static bool __intel_dsm_supported(struct intel_host *host,
0046                   enum intel_ufs_dsm_func_id fn)
0047 {
0048     return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
0049 }
0050 
0051 #define INTEL_DSM_SUPPORTED(host, name) \
0052     __intel_dsm_supported(host, INTEL_DSM_##name)
0053 
0054 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
0055                unsigned int fn, u32 *result)
0056 {
0057     union acpi_object *obj;
0058     int err = 0;
0059     size_t len;
0060 
0061     obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
0062     if (!obj)
0063         return -EOPNOTSUPP;
0064 
0065     if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
0066         err = -EINVAL;
0067         goto out;
0068     }
0069 
0070     len = min_t(size_t, obj->buffer.length, 4);
0071 
0072     *result = 0;
0073     memcpy(result, obj->buffer.pointer, len);
0074 out:
0075     ACPI_FREE(obj);
0076 
0077     return err;
0078 }
0079 
0080 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
0081              unsigned int fn, u32 *result)
0082 {
0083     if (!__intel_dsm_supported(intel_host, fn))
0084         return -EOPNOTSUPP;
0085 
0086     return __intel_dsm(intel_host, dev, fn, result);
0087 }
0088 
0089 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
0090 {
0091     int err;
0092 
0093     err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
0094     dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
0095 }
0096 
0097 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
0098                        enum ufs_notify_change_status status)
0099 {
0100     /* Cannot enable ICE until after HC enable */
0101     if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
0102         u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
0103 
0104         hce |= CRYPTO_GENERAL_ENABLE;
0105         ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
0106     }
0107 
0108     return 0;
0109 }
0110 
0111 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
0112 {
0113     u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
0114     u32 lcc_enable = 0;
0115 
0116     ufshcd_dme_get(hba, attr, &lcc_enable);
0117     if (lcc_enable)
0118         ufshcd_disable_host_tx_lcc(hba);
0119 
0120     return 0;
0121 }
0122 
0123 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
0124                      enum ufs_notify_change_status status)
0125 {
0126     int err = 0;
0127 
0128     switch (status) {
0129     case PRE_CHANGE:
0130         err = ufs_intel_disable_lcc(hba);
0131         break;
0132     case POST_CHANGE:
0133         break;
0134     default:
0135         break;
0136     }
0137 
0138     return err;
0139 }
0140 
0141 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
0142 {
0143     struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
0144     int ret;
0145 
0146     pwr_info.lane_rx = lanes;
0147     pwr_info.lane_tx = lanes;
0148     ret = ufshcd_config_pwr_mode(hba, &pwr_info);
0149     if (ret)
0150         dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
0151             __func__, lanes, ret);
0152     return ret;
0153 }
0154 
0155 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
0156                 enum ufs_notify_change_status status,
0157                 struct ufs_pa_layer_attr *dev_max_params,
0158                 struct ufs_pa_layer_attr *dev_req_params)
0159 {
0160     int err = 0;
0161 
0162     switch (status) {
0163     case PRE_CHANGE:
0164         if (ufshcd_is_hs_mode(dev_max_params) &&
0165             (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
0166             ufs_intel_set_lanes(hba, 2);
0167         memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
0168         break;
0169     case POST_CHANGE:
0170         if (ufshcd_is_hs_mode(dev_req_params)) {
0171             u32 peer_granularity;
0172 
0173             usleep_range(1000, 1250);
0174             err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
0175                           &peer_granularity);
0176         }
0177         break;
0178     default:
0179         break;
0180     }
0181 
0182     return err;
0183 }
0184 
0185 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
0186 {
0187     u32 granularity, peer_granularity;
0188     u32 pa_tactivate, peer_pa_tactivate;
0189     int ret;
0190 
0191     ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
0192     if (ret)
0193         goto out;
0194 
0195     ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
0196     if (ret)
0197         goto out;
0198 
0199     ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
0200     if (ret)
0201         goto out;
0202 
0203     ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
0204     if (ret)
0205         goto out;
0206 
0207     if (granularity == peer_granularity) {
0208         u32 new_peer_pa_tactivate = pa_tactivate + 2;
0209 
0210         ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
0211     }
0212 out:
0213     return ret;
0214 }
0215 
0216 #define INTEL_ACTIVELTR     0x804
0217 #define INTEL_IDLELTR       0x808
0218 
0219 #define INTEL_LTR_REQ       BIT(15)
0220 #define INTEL_LTR_SCALE_MASK    GENMASK(11, 10)
0221 #define INTEL_LTR_SCALE_1US (2 << 10)
0222 #define INTEL_LTR_SCALE_32US    (3 << 10)
0223 #define INTEL_LTR_VALUE_MASK    GENMASK(9, 0)
0224 
0225 static void intel_cache_ltr(struct ufs_hba *hba)
0226 {
0227     struct intel_host *host = ufshcd_get_variant(hba);
0228 
0229     host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
0230     host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
0231 }
0232 
0233 static void intel_ltr_set(struct device *dev, s32 val)
0234 {
0235     struct ufs_hba *hba = dev_get_drvdata(dev);
0236     struct intel_host *host = ufshcd_get_variant(hba);
0237     u32 ltr;
0238 
0239     pm_runtime_get_sync(dev);
0240 
0241     /*
0242      * Program latency tolerance (LTR) accordingly what has been asked
0243      * by the PM QoS layer or disable it in case we were passed
0244      * negative value or PM_QOS_LATENCY_ANY.
0245      */
0246     ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
0247 
0248     if (val == PM_QOS_LATENCY_ANY || val < 0) {
0249         ltr &= ~INTEL_LTR_REQ;
0250     } else {
0251         ltr |= INTEL_LTR_REQ;
0252         ltr &= ~INTEL_LTR_SCALE_MASK;
0253         ltr &= ~INTEL_LTR_VALUE_MASK;
0254 
0255         if (val > INTEL_LTR_VALUE_MASK) {
0256             val >>= 5;
0257             if (val > INTEL_LTR_VALUE_MASK)
0258                 val = INTEL_LTR_VALUE_MASK;
0259             ltr |= INTEL_LTR_SCALE_32US | val;
0260         } else {
0261             ltr |= INTEL_LTR_SCALE_1US | val;
0262         }
0263     }
0264 
0265     if (ltr == host->active_ltr)
0266         goto out;
0267 
0268     writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
0269     writel(ltr, hba->mmio_base + INTEL_IDLELTR);
0270 
0271     /* Cache the values into intel_host structure */
0272     intel_cache_ltr(hba);
0273 out:
0274     pm_runtime_put(dev);
0275 }
0276 
0277 static void intel_ltr_expose(struct device *dev)
0278 {
0279     dev->power.set_latency_tolerance = intel_ltr_set;
0280     dev_pm_qos_expose_latency_tolerance(dev);
0281 }
0282 
0283 static void intel_ltr_hide(struct device *dev)
0284 {
0285     dev_pm_qos_hide_latency_tolerance(dev);
0286     dev->power.set_latency_tolerance = NULL;
0287 }
0288 
0289 static void intel_add_debugfs(struct ufs_hba *hba)
0290 {
0291     struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
0292     struct intel_host *host = ufshcd_get_variant(hba);
0293 
0294     intel_cache_ltr(hba);
0295 
0296     host->debugfs_root = dir;
0297     debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
0298     debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
0299 }
0300 
0301 static void intel_remove_debugfs(struct ufs_hba *hba)
0302 {
0303     struct intel_host *host = ufshcd_get_variant(hba);
0304 
0305     debugfs_remove_recursive(host->debugfs_root);
0306 }
0307 
0308 static int ufs_intel_device_reset(struct ufs_hba *hba)
0309 {
0310     struct intel_host *host = ufshcd_get_variant(hba);
0311 
0312     if (INTEL_DSM_SUPPORTED(host, RESET)) {
0313         u32 result = 0;
0314         int err;
0315 
0316         err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
0317         if (!err && !result)
0318             err = -EIO;
0319         if (err)
0320             dev_err(hba->dev, "%s: DSM error %d result %u\n",
0321                 __func__, err, result);
0322         return err;
0323     }
0324 
0325     if (!host->reset_gpio)
0326         return -EOPNOTSUPP;
0327 
0328     gpiod_set_value_cansleep(host->reset_gpio, 1);
0329     usleep_range(10, 15);
0330 
0331     gpiod_set_value_cansleep(host->reset_gpio, 0);
0332     usleep_range(10, 15);
0333 
0334     return 0;
0335 }
0336 
0337 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
0338 {
0339     /* GPIO in _DSD has active low setting */
0340     return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
0341 }
0342 
0343 static int ufs_intel_common_init(struct ufs_hba *hba)
0344 {
0345     struct intel_host *host;
0346 
0347     hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
0348 
0349     host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
0350     if (!host)
0351         return -ENOMEM;
0352     ufshcd_set_variant(hba, host);
0353     intel_dsm_init(host, hba->dev);
0354     if (INTEL_DSM_SUPPORTED(host, RESET)) {
0355         if (hba->vops->device_reset)
0356             hba->caps |= UFSHCD_CAP_DEEPSLEEP;
0357     } else {
0358         if (hba->vops->device_reset)
0359             host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
0360         if (IS_ERR(host->reset_gpio)) {
0361             dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
0362                 __func__, PTR_ERR(host->reset_gpio));
0363             host->reset_gpio = NULL;
0364         }
0365         if (host->reset_gpio) {
0366             gpiod_set_value_cansleep(host->reset_gpio, 0);
0367             hba->caps |= UFSHCD_CAP_DEEPSLEEP;
0368         }
0369     }
0370     intel_ltr_expose(hba->dev);
0371     intel_add_debugfs(hba);
0372     return 0;
0373 }
0374 
0375 static void ufs_intel_common_exit(struct ufs_hba *hba)
0376 {
0377     intel_remove_debugfs(hba);
0378     intel_ltr_hide(hba->dev);
0379 }
0380 
0381 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
0382 {
0383     if (ufshcd_is_link_hibern8(hba)) {
0384         int ret = ufshcd_uic_hibern8_exit(hba);
0385 
0386         if (!ret) {
0387             ufshcd_set_link_active(hba);
0388         } else {
0389             dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
0390                 __func__, ret);
0391             /*
0392              * Force reset and restore. Any other actions can lead
0393              * to an unrecoverable state.
0394              */
0395             ufshcd_set_link_off(hba);
0396         }
0397     }
0398 
0399     return 0;
0400 }
0401 
0402 static int ufs_intel_ehl_init(struct ufs_hba *hba)
0403 {
0404     hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
0405     return ufs_intel_common_init(hba);
0406 }
0407 
0408 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
0409 {
0410     /* LKF always needs a full reset, so set PM accordingly */
0411     if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
0412         hba->spm_lvl = UFS_PM_LVL_6;
0413         hba->rpm_lvl = UFS_PM_LVL_6;
0414     } else {
0415         hba->spm_lvl = UFS_PM_LVL_5;
0416         hba->rpm_lvl = UFS_PM_LVL_5;
0417     }
0418 }
0419 
0420 static int ufs_intel_lkf_init(struct ufs_hba *hba)
0421 {
0422     struct ufs_host *ufs_host;
0423     int err;
0424 
0425     hba->nop_out_timeout = 200;
0426     hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
0427     hba->caps |= UFSHCD_CAP_CRYPTO;
0428     err = ufs_intel_common_init(hba);
0429     ufs_host = ufshcd_get_variant(hba);
0430     ufs_host->late_init = ufs_intel_lkf_late_init;
0431     return err;
0432 }
0433 
0434 static int ufs_intel_adl_init(struct ufs_hba *hba)
0435 {
0436     hba->nop_out_timeout = 200;
0437     hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
0438     hba->caps |= UFSHCD_CAP_WB_EN;
0439     return ufs_intel_common_init(hba);
0440 }
0441 
0442 static int ufs_intel_mtl_init(struct ufs_hba *hba)
0443 {
0444     hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
0445     return ufs_intel_common_init(hba);
0446 }
0447 
0448 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
0449     .name                   = "intel-pci",
0450     .init           = ufs_intel_common_init,
0451     .exit           = ufs_intel_common_exit,
0452     .link_startup_notify    = ufs_intel_link_startup_notify,
0453     .resume         = ufs_intel_resume,
0454 };
0455 
0456 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
0457     .name                   = "intel-pci",
0458     .init           = ufs_intel_ehl_init,
0459     .exit           = ufs_intel_common_exit,
0460     .link_startup_notify    = ufs_intel_link_startup_notify,
0461     .resume         = ufs_intel_resume,
0462 };
0463 
0464 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
0465     .name                   = "intel-pci",
0466     .init           = ufs_intel_lkf_init,
0467     .exit           = ufs_intel_common_exit,
0468     .hce_enable_notify  = ufs_intel_hce_enable_notify,
0469     .link_startup_notify    = ufs_intel_link_startup_notify,
0470     .pwr_change_notify  = ufs_intel_lkf_pwr_change_notify,
0471     .apply_dev_quirks   = ufs_intel_lkf_apply_dev_quirks,
0472     .resume         = ufs_intel_resume,
0473     .device_reset       = ufs_intel_device_reset,
0474 };
0475 
0476 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
0477     .name           = "intel-pci",
0478     .init           = ufs_intel_adl_init,
0479     .exit           = ufs_intel_common_exit,
0480     .link_startup_notify    = ufs_intel_link_startup_notify,
0481     .resume         = ufs_intel_resume,
0482     .device_reset       = ufs_intel_device_reset,
0483 };
0484 
0485 static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
0486     .name                   = "intel-pci",
0487     .init           = ufs_intel_mtl_init,
0488     .exit           = ufs_intel_common_exit,
0489     .hce_enable_notify  = ufs_intel_hce_enable_notify,
0490     .link_startup_notify    = ufs_intel_link_startup_notify,
0491     .resume         = ufs_intel_resume,
0492     .device_reset       = ufs_intel_device_reset,
0493 };
0494 
0495 #ifdef CONFIG_PM_SLEEP
0496 static int ufshcd_pci_restore(struct device *dev)
0497 {
0498     struct ufs_hba *hba = dev_get_drvdata(dev);
0499 
0500     /* Force a full reset and restore */
0501     ufshcd_set_link_off(hba);
0502 
0503     return ufshcd_system_resume(dev);
0504 }
0505 #endif
0506 
0507 /**
0508  * ufshcd_pci_shutdown - main function to put the controller in reset state
0509  * @pdev: pointer to PCI device handle
0510  */
0511 static void ufshcd_pci_shutdown(struct pci_dev *pdev)
0512 {
0513     ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
0514 }
0515 
0516 /**
0517  * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
0518  *      data structure memory
0519  * @pdev: pointer to PCI handle
0520  */
0521 static void ufshcd_pci_remove(struct pci_dev *pdev)
0522 {
0523     struct ufs_hba *hba = pci_get_drvdata(pdev);
0524 
0525     pm_runtime_forbid(&pdev->dev);
0526     pm_runtime_get_noresume(&pdev->dev);
0527     ufshcd_remove(hba);
0528     ufshcd_dealloc_host(hba);
0529 }
0530 
0531 /**
0532  * ufshcd_pci_probe - probe routine of the driver
0533  * @pdev: pointer to PCI device handle
0534  * @id: PCI device id
0535  *
0536  * Returns 0 on success, non-zero value on failure
0537  */
0538 static int
0539 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
0540 {
0541     struct ufs_host *ufs_host;
0542     struct ufs_hba *hba;
0543     void __iomem *mmio_base;
0544     int err;
0545 
0546     err = pcim_enable_device(pdev);
0547     if (err) {
0548         dev_err(&pdev->dev, "pcim_enable_device failed\n");
0549         return err;
0550     }
0551 
0552     pci_set_master(pdev);
0553 
0554     err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
0555     if (err < 0) {
0556         dev_err(&pdev->dev, "request and iomap failed\n");
0557         return err;
0558     }
0559 
0560     mmio_base = pcim_iomap_table(pdev)[0];
0561 
0562     err = ufshcd_alloc_host(&pdev->dev, &hba);
0563     if (err) {
0564         dev_err(&pdev->dev, "Allocation failed\n");
0565         return err;
0566     }
0567 
0568     hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
0569 
0570     err = ufshcd_init(hba, mmio_base, pdev->irq);
0571     if (err) {
0572         dev_err(&pdev->dev, "Initialization failed\n");
0573         ufshcd_dealloc_host(hba);
0574         return err;
0575     }
0576 
0577     ufs_host = ufshcd_get_variant(hba);
0578     if (ufs_host && ufs_host->late_init)
0579         ufs_host->late_init(hba);
0580 
0581     pm_runtime_put_noidle(&pdev->dev);
0582     pm_runtime_allow(&pdev->dev);
0583 
0584     return 0;
0585 }
0586 
0587 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
0588     SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
0589 #ifdef CONFIG_PM_SLEEP
0590     .suspend    = ufshcd_system_suspend,
0591     .resume     = ufshcd_system_resume,
0592     .freeze     = ufshcd_system_suspend,
0593     .thaw       = ufshcd_system_resume,
0594     .poweroff   = ufshcd_system_suspend,
0595     .restore    = ufshcd_pci_restore,
0596     .prepare    = ufshcd_suspend_prepare,
0597     .complete   = ufshcd_resume_complete,
0598 #endif
0599 };
0600 
0601 static const struct pci_device_id ufshcd_pci_tbl[] = {
0602     { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
0603     { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
0604     { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
0605     { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
0606     { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
0607     { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
0608     { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
0609     { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
0610     { } /* terminate list */
0611 };
0612 
0613 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
0614 
0615 static struct pci_driver ufshcd_pci_driver = {
0616     .name = UFSHCD,
0617     .id_table = ufshcd_pci_tbl,
0618     .probe = ufshcd_pci_probe,
0619     .remove = ufshcd_pci_remove,
0620     .shutdown = ufshcd_pci_shutdown,
0621     .driver = {
0622         .pm = &ufshcd_pci_pm_ops
0623     },
0624 };
0625 
0626 module_pci_driver(ufshcd_pci_driver);
0627 
0628 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
0629 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
0630 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
0631 MODULE_LICENSE("GPL");