0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/atomic.h>
0020 #include <linux/bits.h>
0021 #include <linux/completion.h>
0022 #include <linux/device.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/gfp.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/io.h>
0027 #include <linux/iopoll.h>
0028 #include <linux/jiffies.h>
0029 #include <linux/list.h>
0030 #include <linux/module.h>
0031 #include <linux/mutex.h>
0032 #include <linux/pci.h>
0033 #include <linux/pm.h>
0034 #include <linux/pm_runtime.h>
0035 #include <linux/pm_wakeup.h>
0036 #include <linux/spinlock.h>
0037
0038 #include "t7xx_mhccif.h"
0039 #include "t7xx_modem_ops.h"
0040 #include "t7xx_pci.h"
0041 #include "t7xx_pcie_mac.h"
0042 #include "t7xx_reg.h"
0043 #include "t7xx_state_monitor.h"
0044
0045 #define T7XX_PCI_IREG_BASE 0
0046 #define T7XX_PCI_EREG_BASE 2
0047
0048 #define PM_SLEEP_DIS_TIMEOUT_MS 20
0049 #define PM_ACK_TIMEOUT_MS 1500
0050 #define PM_AUTOSUSPEND_MS 20000
0051 #define PM_RESOURCE_POLL_TIMEOUT_US 10000
0052 #define PM_RESOURCE_POLL_STEP_US 100
0053
0054 enum t7xx_pm_state {
0055 MTK_PM_EXCEPTION,
0056 MTK_PM_INIT,
0057 MTK_PM_SUSPENDED,
0058 MTK_PM_RESUMED,
0059 };
0060
0061 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
0062 {
0063 void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
0064 u32 value;
0065
0066 value = ioread32(ctrl_reg);
0067
0068 if (enable)
0069 value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
0070 else
0071 value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
0072
0073 iowrite32(value, ctrl_reg);
0074 }
0075
0076 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
0077 {
0078 int ret, val;
0079
0080 ret = read_poll_timeout(ioread32, val,
0081 (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
0082 PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
0083 IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
0084 if (ret == -ETIMEDOUT)
0085 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
0086
0087 return ret;
0088 }
0089
0090 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
0091 {
0092 struct pci_dev *pdev = t7xx_dev->pdev;
0093
0094 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
0095 mutex_init(&t7xx_dev->md_pm_entity_mtx);
0096 spin_lock_init(&t7xx_dev->md_pm_lock);
0097 init_completion(&t7xx_dev->sleep_lock_acquire);
0098 init_completion(&t7xx_dev->pm_sr_ack);
0099 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
0100
0101 device_init_wakeup(&pdev->dev, true);
0102 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
0103 DPM_FLAG_NO_DIRECT_COMPLETE);
0104
0105 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
0106 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
0107 pm_runtime_use_autosuspend(&pdev->dev);
0108
0109 return t7xx_wait_pm_config(t7xx_dev);
0110 }
0111
0112 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
0113 {
0114
0115 t7xx_mhccif_mask_clr(t7xx_dev,
0116 D2H_INT_DS_LOCK_ACK |
0117 D2H_INT_SUSPEND_ACK |
0118 D2H_INT_RESUME_ACK |
0119 D2H_INT_SUSPEND_ACK_AP |
0120 D2H_INT_RESUME_ACK_AP);
0121 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0122 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
0123
0124 pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
0125 }
0126
0127 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
0128 {
0129
0130
0131
0132 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
0133
0134 pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
0135
0136 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
0137 return t7xx_wait_pm_config(t7xx_dev);
0138 }
0139
0140 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
0141 {
0142 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
0143 t7xx_wait_pm_config(t7xx_dev);
0144 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
0145 }
0146
0147 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
0148 {
0149 struct md_pm_entity *entity;
0150
0151 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
0152 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0153 if (entity->id == pm_entity->id) {
0154 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
0155 return -EEXIST;
0156 }
0157 }
0158
0159 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
0160 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
0161 return 0;
0162 }
0163
0164 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
0165 {
0166 struct md_pm_entity *entity, *tmp_entity;
0167
0168 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
0169 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
0170 if (entity->id == pm_entity->id) {
0171 list_del(&pm_entity->entity);
0172 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
0173 return 0;
0174 }
0175 }
0176
0177 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
0178
0179 return -ENXIO;
0180 }
0181
0182 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
0183 {
0184 struct device *dev = &t7xx_dev->pdev->dev;
0185 int ret;
0186
0187 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
0188 msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
0189 if (!ret)
0190 dev_err_ratelimited(dev, "Resource wait complete timed out\n");
0191
0192 return ret;
0193 }
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
0205 {
0206 unsigned long flags;
0207
0208 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
0209 t7xx_dev->sleep_disable_count++;
0210 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
0211 goto unlock_and_complete;
0212
0213 if (t7xx_dev->sleep_disable_count == 1) {
0214 u32 status;
0215
0216 reinit_completion(&t7xx_dev->sleep_lock_acquire);
0217 t7xx_dev_set_sleep_capability(t7xx_dev, false);
0218
0219 status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
0220 if (status & T7XX_PCIE_RESOURCE_STS_MSK)
0221 goto unlock_and_complete;
0222
0223 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
0224 }
0225 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
0226 return;
0227
0228 unlock_and_complete:
0229 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
0230 complete_all(&t7xx_dev->sleep_lock_acquire);
0231 }
0232
0233
0234
0235
0236
0237
0238
0239 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
0240 {
0241 unsigned long flags;
0242
0243 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
0244 t7xx_dev->sleep_disable_count--;
0245 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
0246 goto unlock;
0247
0248 if (t7xx_dev->sleep_disable_count == 0)
0249 t7xx_dev_set_sleep_capability(t7xx_dev, true);
0250
0251 unlock:
0252 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
0253 }
0254
0255 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
0256 {
0257 unsigned long wait_ret;
0258
0259 reinit_completion(&t7xx_dev->pm_sr_ack);
0260 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
0261 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
0262 msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
0263 if (!wait_ret)
0264 return -ETIMEDOUT;
0265
0266 return 0;
0267 }
0268
0269 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
0270 {
0271 enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
0272 struct t7xx_pci_dev *t7xx_dev;
0273 struct md_pm_entity *entity;
0274 int ret;
0275
0276 t7xx_dev = pci_get_drvdata(pdev);
0277 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
0278 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
0279 return -EFAULT;
0280 }
0281
0282 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
0283 ret = t7xx_wait_pm_config(t7xx_dev);
0284 if (ret) {
0285 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0286 return ret;
0287 }
0288
0289 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
0290 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
0291 t7xx_dev->rgu_pci_irq_en = false;
0292
0293 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0294 if (!entity->suspend)
0295 continue;
0296
0297 ret = entity->suspend(t7xx_dev, entity->entity_param);
0298 if (ret) {
0299 entity_id = entity->id;
0300 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
0301 goto abort_suspend;
0302 }
0303 }
0304
0305 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
0306 if (ret) {
0307 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
0308 goto abort_suspend;
0309 }
0310
0311 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
0312 if (ret) {
0313 t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
0314 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
0315 goto abort_suspend;
0316 }
0317
0318 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0319 if (entity->suspend_late)
0320 entity->suspend_late(t7xx_dev, entity->entity_param);
0321 }
0322
0323 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0324 return 0;
0325
0326 abort_suspend:
0327 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0328 if (entity_id == entity->id)
0329 break;
0330
0331 if (entity->resume)
0332 entity->resume(t7xx_dev, entity->entity_param);
0333 }
0334
0335 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0336 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
0337 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
0338 return ret;
0339 }
0340
0341 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
0342 {
0343 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
0344
0345
0346 iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
0347
0348
0349
0350
0351 t7xx_pcie_mac_interrupts_en(t7xx_dev);
0352 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
0353 }
0354
0355 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
0356 {
0357 int ret;
0358
0359 ret = pcim_enable_device(t7xx_dev->pdev);
0360 if (ret)
0361 return ret;
0362
0363 t7xx_pcie_mac_atr_init(t7xx_dev);
0364 t7xx_pcie_interrupt_reinit(t7xx_dev);
0365
0366 if (is_d3) {
0367 t7xx_mhccif_init(t7xx_dev);
0368 return t7xx_pci_pm_reinit(t7xx_dev);
0369 }
0370
0371 return 0;
0372 }
0373
0374 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
0375 {
0376 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
0377 struct device *dev = &t7xx_dev->pdev->dev;
0378 int ret = -EINVAL;
0379
0380 switch (event) {
0381 case FSM_CMD_STOP:
0382 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
0383 break;
0384
0385 case FSM_CMD_START:
0386 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
0387 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
0388 t7xx_dev->rgu_pci_irq_en = true;
0389 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
0390 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
0391 break;
0392
0393 default:
0394 break;
0395 }
0396
0397 if (ret)
0398 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
0399
0400 return ret;
0401 }
0402
0403 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
0404 {
0405 struct t7xx_pci_dev *t7xx_dev;
0406 struct md_pm_entity *entity;
0407 u32 prev_state;
0408 int ret = 0;
0409
0410 t7xx_dev = pci_get_drvdata(pdev);
0411 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
0412 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0413 return 0;
0414 }
0415
0416 t7xx_pcie_mac_interrupts_en(t7xx_dev);
0417 prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
0418
0419 if (state_check) {
0420
0421
0422
0423
0424 u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
0425 ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
0426 if (prev_state == PM_RESUME_REG_STATE_L3 ||
0427 (prev_state == PM_RESUME_REG_STATE_INIT &&
0428 atr_reg_val == ATR_SRC_ADDR_INVALID)) {
0429 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
0430 if (ret)
0431 return ret;
0432
0433 ret = t7xx_pcie_reinit(t7xx_dev, true);
0434 if (ret)
0435 return ret;
0436
0437 t7xx_clear_rgu_irq(t7xx_dev);
0438 return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
0439 }
0440
0441 if (prev_state == PM_RESUME_REG_STATE_EXP ||
0442 prev_state == PM_RESUME_REG_STATE_L2_EXP) {
0443 if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
0444 ret = t7xx_pcie_reinit(t7xx_dev, false);
0445 if (ret)
0446 return ret;
0447 }
0448
0449 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
0450 t7xx_dev->rgu_pci_irq_en = true;
0451 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
0452
0453 t7xx_mhccif_mask_clr(t7xx_dev,
0454 D2H_INT_EXCEPTION_INIT |
0455 D2H_INT_EXCEPTION_INIT_DONE |
0456 D2H_INT_EXCEPTION_CLEARQ_DONE |
0457 D2H_INT_EXCEPTION_ALLQ_RESET |
0458 D2H_INT_PORT_ENUM);
0459
0460 return ret;
0461 }
0462
0463 if (prev_state == PM_RESUME_REG_STATE_L2) {
0464 ret = t7xx_pcie_reinit(t7xx_dev, false);
0465 if (ret)
0466 return ret;
0467
0468 } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
0469 prev_state != PM_RESUME_REG_STATE_INIT) {
0470 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
0471 if (ret)
0472 return ret;
0473
0474 t7xx_clear_rgu_irq(t7xx_dev);
0475 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
0476 return 0;
0477 }
0478 }
0479
0480 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
0481 t7xx_wait_pm_config(t7xx_dev);
0482
0483 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0484 if (entity->resume_early)
0485 entity->resume_early(t7xx_dev, entity->entity_param);
0486 }
0487
0488 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
0489 if (ret)
0490 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
0491
0492 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
0493 if (ret)
0494 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
0495
0496 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
0497 if (entity->resume) {
0498 ret = entity->resume(t7xx_dev, entity->entity_param);
0499 if (ret)
0500 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
0501 entity->id, ret);
0502 }
0503 }
0504
0505 t7xx_dev->rgu_pci_irq_en = true;
0506 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
0507 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
0508 pm_runtime_mark_last_busy(&pdev->dev);
0509 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
0510
0511 return ret;
0512 }
0513
0514 static int t7xx_pci_pm_resume_noirq(struct device *dev)
0515 {
0516 struct pci_dev *pdev = to_pci_dev(dev);
0517 struct t7xx_pci_dev *t7xx_dev;
0518
0519 t7xx_dev = pci_get_drvdata(pdev);
0520 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
0521
0522 return 0;
0523 }
0524
0525 static void t7xx_pci_shutdown(struct pci_dev *pdev)
0526 {
0527 __t7xx_pci_pm_suspend(pdev);
0528 }
0529
0530 static int t7xx_pci_pm_suspend(struct device *dev)
0531 {
0532 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
0533 }
0534
0535 static int t7xx_pci_pm_resume(struct device *dev)
0536 {
0537 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
0538 }
0539
0540 static int t7xx_pci_pm_thaw(struct device *dev)
0541 {
0542 return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
0543 }
0544
0545 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
0546 {
0547 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
0548 }
0549
0550 static int t7xx_pci_pm_runtime_resume(struct device *dev)
0551 {
0552 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
0553 }
0554
0555 static const struct dev_pm_ops t7xx_pci_pm_ops = {
0556 .suspend = t7xx_pci_pm_suspend,
0557 .resume = t7xx_pci_pm_resume,
0558 .resume_noirq = t7xx_pci_pm_resume_noirq,
0559 .freeze = t7xx_pci_pm_suspend,
0560 .thaw = t7xx_pci_pm_thaw,
0561 .poweroff = t7xx_pci_pm_suspend,
0562 .restore = t7xx_pci_pm_resume,
0563 .restore_noirq = t7xx_pci_pm_resume_noirq,
0564 .runtime_suspend = t7xx_pci_pm_runtime_suspend,
0565 .runtime_resume = t7xx_pci_pm_runtime_resume
0566 };
0567
0568 static int t7xx_request_irq(struct pci_dev *pdev)
0569 {
0570 struct t7xx_pci_dev *t7xx_dev;
0571 int ret = 0, i;
0572
0573 t7xx_dev = pci_get_drvdata(pdev);
0574
0575 for (i = 0; i < EXT_INT_NUM; i++) {
0576 const char *irq_descr;
0577 int irq_vec;
0578
0579 if (!t7xx_dev->intr_handler[i])
0580 continue;
0581
0582 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
0583 dev_driver_string(&pdev->dev), i);
0584 if (!irq_descr) {
0585 ret = -ENOMEM;
0586 break;
0587 }
0588
0589 irq_vec = pci_irq_vector(pdev, i);
0590 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
0591 t7xx_dev->intr_thread[i], 0, irq_descr,
0592 t7xx_dev->callback_param[i]);
0593 if (ret) {
0594 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
0595 break;
0596 }
0597 }
0598
0599 if (ret) {
0600 while (i--) {
0601 if (!t7xx_dev->intr_handler[i])
0602 continue;
0603
0604 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
0605 }
0606 }
0607
0608 return ret;
0609 }
0610
0611 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
0612 {
0613 struct pci_dev *pdev = t7xx_dev->pdev;
0614 int ret;
0615
0616
0617 ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
0618 if (ret < 0) {
0619 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
0620 return ret;
0621 }
0622
0623 ret = t7xx_request_irq(pdev);
0624 if (ret) {
0625 pci_free_irq_vectors(pdev);
0626 return ret;
0627 }
0628
0629 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
0630 return 0;
0631 }
0632
0633 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
0634 {
0635 int ret, i;
0636
0637 if (!t7xx_dev->pdev->msix_cap)
0638 return -EINVAL;
0639
0640 ret = t7xx_setup_msix(t7xx_dev);
0641 if (ret)
0642 return ret;
0643
0644
0645 for (i = 0; i < EXT_INT_NUM; i++)
0646 t7xx_pcie_mac_set_int(t7xx_dev, i);
0647
0648 return 0;
0649 }
0650
0651 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
0652 {
0653 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
0654 INFRACFG_AO_DEV_CHIP -
0655 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
0656 }
0657
0658 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
0659 {
0660 struct t7xx_pci_dev *t7xx_dev;
0661 int ret;
0662
0663 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
0664 if (!t7xx_dev)
0665 return -ENOMEM;
0666
0667 pci_set_drvdata(pdev, t7xx_dev);
0668 t7xx_dev->pdev = pdev;
0669
0670 ret = pcim_enable_device(pdev);
0671 if (ret)
0672 return ret;
0673
0674 pci_set_master(pdev);
0675
0676 ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
0677 pci_name(pdev));
0678 if (ret) {
0679 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
0680 return -ENOMEM;
0681 }
0682
0683 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
0684 if (ret) {
0685 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
0686 return ret;
0687 }
0688
0689 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
0690 if (ret) {
0691 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
0692 return ret;
0693 }
0694
0695 IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
0696 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
0697
0698 ret = t7xx_pci_pm_init(t7xx_dev);
0699 if (ret)
0700 return ret;
0701
0702 t7xx_pcie_mac_atr_init(t7xx_dev);
0703 t7xx_pci_infracfg_ao_calc(t7xx_dev);
0704 t7xx_mhccif_init(t7xx_dev);
0705
0706 ret = t7xx_md_init(t7xx_dev);
0707 if (ret)
0708 return ret;
0709
0710 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
0711
0712 ret = t7xx_interrupt_init(t7xx_dev);
0713 if (ret) {
0714 t7xx_md_exit(t7xx_dev);
0715 return ret;
0716 }
0717
0718 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
0719 t7xx_pcie_mac_interrupts_en(t7xx_dev);
0720
0721 return 0;
0722 }
0723
0724 static void t7xx_pci_remove(struct pci_dev *pdev)
0725 {
0726 struct t7xx_pci_dev *t7xx_dev;
0727 int i;
0728
0729 t7xx_dev = pci_get_drvdata(pdev);
0730 t7xx_md_exit(t7xx_dev);
0731
0732 for (i = 0; i < EXT_INT_NUM; i++) {
0733 if (!t7xx_dev->intr_handler[i])
0734 continue;
0735
0736 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
0737 }
0738
0739 pci_free_irq_vectors(t7xx_dev->pdev);
0740 }
0741
0742 static const struct pci_device_id t7xx_pci_table[] = {
0743 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
0744 { }
0745 };
0746 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
0747
0748 static struct pci_driver t7xx_pci_driver = {
0749 .name = "mtk_t7xx",
0750 .id_table = t7xx_pci_table,
0751 .probe = t7xx_pci_probe,
0752 .remove = t7xx_pci_remove,
0753 .driver.pm = &t7xx_pci_pm_ops,
0754 .shutdown = t7xx_pci_shutdown,
0755 };
0756
0757 module_pci_driver(t7xx_pci_driver);
0758
0759 MODULE_AUTHOR("MediaTek Inc");
0760 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
0761 MODULE_LICENSE("GPL");