0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/acpi.h>
0019 #include <linux/bits.h>
0020 #include <linux/bitfield.h>
0021 #include <linux/device.h>
0022 #include <linux/delay.h>
0023 #include <linux/gfp.h>
0024 #include <linux/io.h>
0025 #include <linux/irqreturn.h>
0026 #include <linux/kthread.h>
0027 #include <linux/skbuff.h>
0028 #include <linux/spinlock.h>
0029 #include <linux/string.h>
0030 #include <linux/types.h>
0031 #include <linux/wait.h>
0032 #include <linux/workqueue.h>
0033
0034 #include "t7xx_cldma.h"
0035 #include "t7xx_hif_cldma.h"
0036 #include "t7xx_mhccif.h"
0037 #include "t7xx_modem_ops.h"
0038 #include "t7xx_netdev.h"
0039 #include "t7xx_pci.h"
0040 #include "t7xx_pcie_mac.h"
0041 #include "t7xx_port.h"
0042 #include "t7xx_port_proxy.h"
0043 #include "t7xx_reg.h"
0044 #include "t7xx_state_monitor.h"
0045
0046 #define RT_ID_MD_PORT_ENUM 0
0047
0048 #define MD_FEATURE_QUERY_ID 0x49434343
0049
0050 #define FEATURE_VER GENMASK(7, 4)
0051 #define FEATURE_MSK GENMASK(3, 0)
0052
0053 #define RGU_RESET_DELAY_MS 10
0054 #define PORT_RESET_DELAY_MS 2000
0055 #define EX_HS_TIMEOUT_MS 5000
0056 #define EX_HS_POLL_DELAY_MS 10
0057
0058 enum mtk_feature_support_type {
0059 MTK_FEATURE_DOES_NOT_EXIST,
0060 MTK_FEATURE_NOT_SUPPORTED,
0061 MTK_FEATURE_MUST_BE_SUPPORTED,
0062 };
0063
0064 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
0065 {
0066 return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
0067 }
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
0080 {
0081 struct t7xx_modem *md = t7xx_dev->md;
0082 struct t7xx_fsm_ctl *ctl;
0083 unsigned int int_sta;
0084 int ret = 0;
0085 u32 mask;
0086
0087 ctl = md->fsm_ctl;
0088 if (!ctl) {
0089 dev_err_ratelimited(&t7xx_dev->pdev->dev,
0090 "MHCCIF interrupt received before initializing MD monitor\n");
0091 return -EINVAL;
0092 }
0093
0094 spin_lock_bh(&md->exp_lock);
0095 int_sta = t7xx_get_interrupt_status(t7xx_dev);
0096 md->exp_id |= int_sta;
0097 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
0098 if (ctl->md_state == MD_STATE_INVALID ||
0099 ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
0100 ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
0101 ctl->md_state == MD_STATE_READY) {
0102 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
0103 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
0104 }
0105 } else if (md->exp_id & D2H_INT_PORT_ENUM) {
0106 md->exp_id &= ~D2H_INT_PORT_ENUM;
0107
0108 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
0109 ctl->curr_state == FSM_STATE_STOPPED)
0110 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
0111 } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
0112 mask = t7xx_mhccif_mask_get(t7xx_dev);
0113 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
0114 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
0115 queue_work(md->handshake_wq, &md->handshake_work);
0116 }
0117 }
0118 spin_unlock_bh(&md->exp_lock);
0119
0120 return ret;
0121 }
0122
0123 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
0124 {
0125 struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
0126 void __iomem *reset_pcie_reg;
0127 u32 val;
0128
0129 reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
0130 pbase_addr->pcie_dev_reg_trsl_addr;
0131 val = ioread32(reset_pcie_reg);
0132 iowrite32(val, reset_pcie_reg);
0133 }
0134
0135 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
0136 {
0137
0138 t7xx_clr_device_irq_via_pcie(t7xx_dev);
0139
0140 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
0141 }
0142
0143 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
0144 {
0145 #ifdef CONFIG_ACPI
0146 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
0147 struct device *dev = &t7xx_dev->pdev->dev;
0148 acpi_status acpi_ret;
0149 acpi_handle handle;
0150
0151 handle = ACPI_HANDLE(dev);
0152 if (!handle) {
0153 dev_err(dev, "ACPI handle not found\n");
0154 return -EFAULT;
0155 }
0156
0157 if (!acpi_has_method(handle, fn_name)) {
0158 dev_err(dev, "%s method not found\n", fn_name);
0159 return -EFAULT;
0160 }
0161
0162 acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
0163 if (ACPI_FAILURE(acpi_ret)) {
0164 dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
0165 return -EFAULT;
0166 }
0167
0168 #endif
0169 return 0;
0170 }
0171
0172 int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
0173 {
0174 return t7xx_acpi_reset(t7xx_dev, "_RST");
0175 }
0176
0177 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
0178 {
0179 u32 val;
0180
0181 val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
0182 if (val & MISC_RESET_TYPE_PLDR)
0183 t7xx_acpi_reset(t7xx_dev, "MRST._RST");
0184 else if (val & MISC_RESET_TYPE_FLDR)
0185 t7xx_acpi_fldr_func(t7xx_dev);
0186 }
0187
0188 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
0189 {
0190 struct t7xx_pci_dev *t7xx_dev = data;
0191
0192 msleep(RGU_RESET_DELAY_MS);
0193 t7xx_reset_device_via_pmic(t7xx_dev);
0194 return IRQ_HANDLED;
0195 }
0196
0197 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
0198 {
0199 struct t7xx_pci_dev *t7xx_dev = data;
0200 struct t7xx_modem *modem;
0201
0202 t7xx_clear_rgu_irq(t7xx_dev);
0203 if (!t7xx_dev->rgu_pci_irq_en)
0204 return IRQ_HANDLED;
0205
0206 modem = t7xx_dev->md;
0207 modem->rgu_irq_asserted = true;
0208 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
0209 return IRQ_WAKE_THREAD;
0210 }
0211
0212 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
0213 {
0214
0215 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
0216 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
0217
0218 t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
0219 t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
0220 t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
0221 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
0222 }
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
0258 {
0259 switch (stage) {
0260 case HIF_EX_INIT:
0261 t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
0262 t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
0263 break;
0264
0265 case HIF_EX_CLEARQ_DONE:
0266
0267
0268
0269 t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
0270 t7xx_cldma_stop(md_ctrl);
0271
0272 if (md_ctrl->hif_id == CLDMA_ID_MD)
0273 t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
0274
0275 t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
0276 break;
0277
0278 case HIF_EX_ALLQ_RESET:
0279 t7xx_cldma_hw_init(&md_ctrl->hw_info);
0280 t7xx_cldma_start(md_ctrl);
0281 break;
0282
0283 default:
0284 break;
0285 }
0286 }
0287
0288 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
0289 {
0290 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
0291
0292 if (stage == HIF_EX_CLEARQ_DONE) {
0293
0294 msleep(PORT_RESET_DELAY_MS);
0295 t7xx_port_proxy_reset(md->port_prox);
0296 }
0297
0298 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
0299
0300 if (stage == HIF_EX_INIT)
0301 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
0302 else if (stage == HIF_EX_CLEARQ_DONE)
0303 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
0304 }
0305
0306 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
0307 {
0308 unsigned int waited_time_ms = 0;
0309
0310 do {
0311 if (md->exp_id & event_id)
0312 return 0;
0313
0314 waited_time_ms += EX_HS_POLL_DELAY_MS;
0315 msleep(EX_HS_POLL_DELAY_MS);
0316 } while (waited_time_ms < EX_HS_TIMEOUT_MS);
0317
0318 return -EFAULT;
0319 }
0320
0321 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
0322 {
0323
0324
0325
0326 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
0327 t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
0328
0329
0330 t7xx_dev->rgu_pci_irq_en = true;
0331 t7xx_pcie_register_rgu_isr(t7xx_dev);
0332 }
0333
0334 struct feature_query {
0335 __le32 head_pattern;
0336 u8 feature_set[FEATURE_COUNT];
0337 __le32 tail_pattern;
0338 };
0339
0340 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
0341 {
0342 struct feature_query *ft_query;
0343 struct sk_buff *skb;
0344
0345 skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
0346 if (!skb)
0347 return;
0348
0349 ft_query = skb_put(skb, sizeof(*ft_query));
0350 ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
0351 memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
0352 ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
0353
0354
0355 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
0356 }
0357
0358 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
0359 void *data)
0360 {
0361 struct feature_query *md_feature = data;
0362 struct mtk_runtime_feature *rt_feature;
0363 unsigned int i, rt_data_len = 0;
0364 struct sk_buff *skb;
0365
0366
0367 if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
0368 le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
0369 dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
0370 le32_to_cpu(md_feature->head_pattern),
0371 le32_to_cpu(md_feature->tail_pattern));
0372 return -EINVAL;
0373 }
0374
0375 for (i = 0; i < FEATURE_COUNT; i++) {
0376 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
0377 MTK_FEATURE_MUST_BE_SUPPORTED)
0378 rt_data_len += sizeof(*rt_feature);
0379 }
0380
0381 skb = t7xx_ctrl_alloc_skb(rt_data_len);
0382 if (!skb)
0383 return -ENOMEM;
0384
0385 rt_feature = skb_put(skb, rt_data_len);
0386 memset(rt_feature, 0, rt_data_len);
0387
0388
0389 for (i = 0; i < FEATURE_COUNT; i++) {
0390 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
0391
0392 if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
0393 continue;
0394
0395 rt_feature->feature_id = i;
0396 if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
0397 rt_feature->support_info = md_feature->feature_set[i];
0398
0399 rt_feature++;
0400 }
0401
0402
0403 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
0404 return 0;
0405 }
0406
0407 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
0408 struct device *dev, void *data, int data_length)
0409 {
0410 enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
0411 struct mtk_runtime_feature *rt_feature;
0412 int i, offset;
0413
0414 offset = sizeof(struct feature_query);
0415 for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
0416 rt_feature = data + offset;
0417 offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
0418
0419 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
0420 if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
0421 continue;
0422
0423 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
0424 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
0425 return -EINVAL;
0426
0427 if (i == RT_ID_MD_PORT_ENUM)
0428 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
0429 }
0430
0431 return 0;
0432 }
0433
0434 static int t7xx_core_reset(struct t7xx_modem *md)
0435 {
0436 struct device *dev = &md->t7xx_dev->pdev->dev;
0437 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0438
0439 md->core_md.ready = false;
0440
0441 if (!ctl) {
0442 dev_err(dev, "FSM is not initialized\n");
0443 return -EINVAL;
0444 }
0445
0446 if (md->core_md.handshake_ongoing) {
0447 int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
0448
0449 if (ret)
0450 return ret;
0451 }
0452
0453 md->core_md.handshake_ongoing = false;
0454 return 0;
0455 }
0456
0457 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
0458 enum t7xx_fsm_event_state event_id,
0459 enum t7xx_fsm_event_state err_detect)
0460 {
0461 struct t7xx_fsm_event *event = NULL, *event_next;
0462 struct t7xx_sys_info *core_info = &md->core_md;
0463 struct device *dev = &md->t7xx_dev->pdev->dev;
0464 unsigned long flags;
0465 int ret;
0466
0467 t7xx_prepare_host_rt_data_query(core_info);
0468
0469 while (!kthread_should_stop()) {
0470 bool event_received = false;
0471
0472 spin_lock_irqsave(&ctl->event_lock, flags);
0473 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
0474 if (event->event_id == err_detect) {
0475 list_del(&event->entry);
0476 spin_unlock_irqrestore(&ctl->event_lock, flags);
0477 dev_err(dev, "Core handshake error event received\n");
0478 goto err_free_event;
0479 } else if (event->event_id == event_id) {
0480 list_del(&event->entry);
0481 event_received = true;
0482 break;
0483 }
0484 }
0485 spin_unlock_irqrestore(&ctl->event_lock, flags);
0486
0487 if (event_received)
0488 break;
0489
0490 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
0491 kthread_should_stop());
0492 if (kthread_should_stop())
0493 goto err_free_event;
0494 }
0495
0496 if (!event || ctl->exp_flg)
0497 goto err_free_event;
0498
0499 ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
0500 if (ret) {
0501 dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
0502 goto err_free_event;
0503 }
0504
0505 if (ctl->exp_flg)
0506 goto err_free_event;
0507
0508 ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
0509 if (ret) {
0510 dev_err(dev, "Device failure parsing runtime data: %d", ret);
0511 goto err_free_event;
0512 }
0513
0514 core_info->ready = true;
0515 core_info->handshake_ongoing = false;
0516 wake_up(&ctl->async_hk_wq);
0517 err_free_event:
0518 kfree(event);
0519 }
0520
0521 static void t7xx_md_hk_wq(struct work_struct *work)
0522 {
0523 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
0524 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0525
0526
0527 t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
0528 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
0529 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
0530 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
0531 md->core_md.handshake_ongoing = true;
0532 t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
0533 }
0534
0535 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
0536 {
0537 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0538 void __iomem *mhccif_base;
0539 unsigned int int_sta;
0540 unsigned long flags;
0541
0542 switch (evt_id) {
0543 case FSM_PRE_START:
0544 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
0545 break;
0546
0547 case FSM_START:
0548 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
0549
0550 spin_lock_irqsave(&md->exp_lock, flags);
0551 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
0552 md->exp_id |= int_sta;
0553 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
0554 ctl->exp_flg = true;
0555 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
0556 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
0557 } else if (ctl->exp_flg) {
0558 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
0559 } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
0560 queue_work(md->handshake_wq, &md->handshake_work);
0561 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
0562 mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
0563 iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
0564 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
0565 } else {
0566 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
0567 }
0568 spin_unlock_irqrestore(&md->exp_lock, flags);
0569
0570 t7xx_mhccif_mask_clr(md->t7xx_dev,
0571 D2H_INT_EXCEPTION_INIT |
0572 D2H_INT_EXCEPTION_INIT_DONE |
0573 D2H_INT_EXCEPTION_CLEARQ_DONE |
0574 D2H_INT_EXCEPTION_ALLQ_RESET);
0575 break;
0576
0577 case FSM_READY:
0578 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
0579 break;
0580
0581 default:
0582 break;
0583 }
0584 }
0585
0586 void t7xx_md_exception_handshake(struct t7xx_modem *md)
0587 {
0588 struct device *dev = &md->t7xx_dev->pdev->dev;
0589 int ret;
0590
0591 t7xx_md_exception(md, HIF_EX_INIT);
0592 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
0593 if (ret)
0594 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
0595
0596 t7xx_md_exception(md, HIF_EX_INIT_DONE);
0597 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
0598 if (ret)
0599 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
0600
0601 t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
0602 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
0603 if (ret)
0604 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
0605
0606 t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
0607 }
0608
0609 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
0610 {
0611 struct device *dev = &t7xx_dev->pdev->dev;
0612 struct t7xx_modem *md;
0613
0614 md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
0615 if (!md)
0616 return NULL;
0617
0618 md->t7xx_dev = t7xx_dev;
0619 t7xx_dev->md = md;
0620 spin_lock_init(&md->exp_lock);
0621 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
0622 0, "md_hk_wq");
0623 if (!md->handshake_wq)
0624 return NULL;
0625
0626 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
0627 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
0628 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
0629 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
0630 return md;
0631 }
0632
0633 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
0634 {
0635 struct t7xx_modem *md = t7xx_dev->md;
0636
0637 md->md_init_finish = false;
0638 md->exp_id = 0;
0639 t7xx_fsm_reset(md);
0640 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
0641 t7xx_port_proxy_reset(md->port_prox);
0642 md->md_init_finish = true;
0643 return t7xx_core_reset(md);
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
0658 {
0659 struct t7xx_modem *md;
0660 int ret;
0661
0662 md = t7xx_md_alloc(t7xx_dev);
0663 if (!md)
0664 return -ENOMEM;
0665
0666 ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
0667 if (ret)
0668 goto err_destroy_hswq;
0669
0670 ret = t7xx_fsm_init(md);
0671 if (ret)
0672 goto err_destroy_hswq;
0673
0674 ret = t7xx_ccmni_init(t7xx_dev);
0675 if (ret)
0676 goto err_uninit_fsm;
0677
0678 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
0679 if (ret)
0680 goto err_uninit_ccmni;
0681
0682 ret = t7xx_port_proxy_init(md);
0683 if (ret)
0684 goto err_uninit_md_cldma;
0685
0686 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
0687 if (ret)
0688 goto err_uninit_proxy;
0689
0690 t7xx_md_sys_sw_init(t7xx_dev);
0691 md->md_init_finish = true;
0692 return 0;
0693
0694 err_uninit_proxy:
0695 t7xx_port_proxy_uninit(md->port_prox);
0696
0697 err_uninit_md_cldma:
0698 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
0699
0700 err_uninit_ccmni:
0701 t7xx_ccmni_exit(t7xx_dev);
0702
0703 err_uninit_fsm:
0704 t7xx_fsm_uninit(md);
0705
0706 err_destroy_hswq:
0707 destroy_workqueue(md->handshake_wq);
0708 dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
0709 return ret;
0710 }
0711
0712 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
0713 {
0714 struct t7xx_modem *md = t7xx_dev->md;
0715
0716 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
0717
0718 if (!md->md_init_finish)
0719 return;
0720
0721 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
0722 t7xx_port_proxy_uninit(md->port_prox);
0723 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
0724 t7xx_ccmni_exit(t7xx_dev);
0725 t7xx_fsm_uninit(md);
0726 destroy_workqueue(md->handshake_wq);
0727 }