0001
0002
0003
0004
0005 #include <linux/devcoredump.h>
0006
0007 #include "cam.h"
0008 #include "debug.h"
0009 #include "fw.h"
0010 #include "mac.h"
0011 #include "ps.h"
0012 #include "reg.h"
0013 #include "ser.h"
0014 #include "util.h"
0015
0016 #define SER_RECFG_TIMEOUT 1000
0017
0018 enum ser_evt {
0019 SER_EV_NONE,
0020 SER_EV_STATE_IN,
0021 SER_EV_STATE_OUT,
0022 SER_EV_L1_RESET,
0023 SER_EV_DO_RECOVERY,
0024 SER_EV_MAC_RESET_DONE,
0025 SER_EV_L2_RESET,
0026 SER_EV_L2_RECFG_DONE,
0027 SER_EV_L2_RECFG_TIMEOUT,
0028 SER_EV_M3_TIMEOUT,
0029 SER_EV_FW_M5_TIMEOUT,
0030 SER_EV_L0_RESET,
0031 SER_EV_MAXX
0032 };
0033
0034 enum ser_state {
0035 SER_IDLE_ST,
0036 SER_RESET_TRX_ST,
0037 SER_DO_HCI_ST,
0038 SER_L2_RESET_ST,
0039 SER_ST_MAX_ST
0040 };
0041
0042 struct ser_msg {
0043 struct list_head list;
0044 u8 event;
0045 };
0046
0047 struct state_ent {
0048 u8 state;
0049 char *name;
0050 void (*st_func)(struct rtw89_ser *ser, u8 event);
0051 };
0052
0053 struct event_ent {
0054 u8 event;
0055 char *name;
0056 };
0057
0058 static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
0059 {
0060 if (event < SER_EV_MAXX)
0061 return ser->ev_tbl[event].name;
0062
0063 return "err_ev_name";
0064 }
0065
0066 static char *ser_st_name(struct rtw89_ser *ser)
0067 {
0068 if (ser->state < SER_ST_MAX_ST)
0069 return ser->st_tbl[ser->state].name;
0070
0071 return "err_st_name";
0072 }
0073
0074 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
0075 struct ser_cd_ ## _name { \
0076 u32 type; \
0077 u32 type_size; \
0078 u64 padding; \
0079 u8 data[_size]; \
0080 } __packed; \
0081 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
0082 { \
0083 p->type = _type; \
0084 p->type_size = sizeof(p->data); \
0085 p->padding = 0x0123456789abcdef; \
0086 }
0087
0088 enum rtw89_ser_cd_type {
0089 RTW89_SER_CD_FW_RSVD_PLE = 0,
0090 RTW89_SER_CD_FW_BACKTRACE = 1,
0091 };
0092
0093 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
0094 RTW89_SER_CD_FW_RSVD_PLE,
0095 RTW89_FW_RSVD_PLE_SIZE);
0096
0097 RTW89_DEF_SER_CD_TYPE(fw_backtrace,
0098 RTW89_SER_CD_FW_BACKTRACE,
0099 RTW89_FW_BACKTRACE_MAX_SIZE);
0100
0101 struct rtw89_ser_cd_buffer {
0102 struct ser_cd_fw_rsvd_ple fwple;
0103 struct ser_cd_fw_backtrace fwbt;
0104 } __packed;
0105
0106 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
0107 {
0108 struct rtw89_ser_cd_buffer *buf;
0109
0110 buf = vzalloc(sizeof(*buf));
0111 if (!buf)
0112 return NULL;
0113
0114 ser_cd_fw_rsvd_ple_init(&buf->fwple);
0115 ser_cd_fw_backtrace_init(&buf->fwbt);
0116
0117 return buf;
0118 }
0119
0120 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
0121 struct rtw89_ser_cd_buffer *buf)
0122 {
0123 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
0124
0125
0126
0127
0128
0129
0130 dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
0131 }
0132
0133 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
0134 struct rtw89_ser_cd_buffer *buf, bool free_self)
0135 {
0136 if (!free_self)
0137 return;
0138
0139 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
0140
0141
0142
0143
0144
0145 vfree(buf);
0146 }
0147
0148 static void ser_state_run(struct rtw89_ser *ser, u8 evt)
0149 {
0150 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0151
0152 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
0153 ser_st_name(ser), ser_ev_name(ser, evt));
0154
0155 rtw89_leave_lps(rtwdev);
0156 ser->st_tbl[ser->state].st_func(ser, evt);
0157 }
0158
0159 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
0160 {
0161 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0162
0163 if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
0164 return;
0165 ser_state_run(ser, SER_EV_STATE_OUT);
0166
0167 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
0168 ser_st_name(ser), ser->st_tbl[new_state].name);
0169
0170 ser->state = new_state;
0171 ser_state_run(ser, SER_EV_STATE_IN);
0172 }
0173
0174 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
0175 {
0176 struct ser_msg *msg;
0177
0178 spin_lock_irq(&ser->msg_q_lock);
0179 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
0180 if (msg)
0181 list_del(&msg->list);
0182 spin_unlock_irq(&ser->msg_q_lock);
0183
0184 return msg;
0185 }
0186
0187 static void rtw89_ser_hdl_work(struct work_struct *work)
0188 {
0189 struct ser_msg *msg;
0190 struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
0191 ser_hdl_work);
0192
0193 while ((msg = __rtw89_ser_dequeue_msg(ser))) {
0194 ser_state_run(ser, msg->event);
0195 kfree(msg);
0196 }
0197 }
0198
0199 static int ser_send_msg(struct rtw89_ser *ser, u8 event)
0200 {
0201 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0202 struct ser_msg *msg = NULL;
0203
0204 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
0205 return -EIO;
0206
0207 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
0208 if (!msg)
0209 return -ENOMEM;
0210
0211 msg->event = event;
0212
0213 spin_lock_irq(&ser->msg_q_lock);
0214 list_add(&msg->list, &ser->msg_q);
0215 spin_unlock_irq(&ser->msg_q_lock);
0216
0217 ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
0218 return 0;
0219 }
0220
0221 static void rtw89_ser_alarm_work(struct work_struct *work)
0222 {
0223 struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
0224 ser_alarm_work.work);
0225
0226 ser_send_msg(ser, ser->alarm_event);
0227 ser->alarm_event = SER_EV_NONE;
0228 }
0229
0230 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
0231 {
0232 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0233
0234 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
0235 return;
0236
0237 ser->alarm_event = event;
0238 ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
0239 msecs_to_jiffies(ms));
0240 }
0241
0242 static void ser_del_alarm(struct rtw89_ser *ser)
0243 {
0244 cancel_delayed_work(&ser->ser_alarm_work);
0245 ser->alarm_event = SER_EV_NONE;
0246 }
0247
0248
0249 static void drv_stop_tx(struct rtw89_ser *ser)
0250 {
0251 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0252
0253 ieee80211_stop_queues(rtwdev->hw);
0254 set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
0255 }
0256
0257 static void drv_stop_rx(struct rtw89_ser *ser)
0258 {
0259 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0260
0261 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
0262 set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
0263 }
0264
0265 static void drv_trx_reset(struct rtw89_ser *ser)
0266 {
0267 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0268
0269 rtw89_hci_reset(rtwdev);
0270 }
0271
0272 static void drv_resume_tx(struct rtw89_ser *ser)
0273 {
0274 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0275
0276 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
0277 return;
0278
0279 ieee80211_wake_queues(rtwdev->hw);
0280 clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
0281 }
0282
0283 static void drv_resume_rx(struct rtw89_ser *ser)
0284 {
0285 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0286
0287 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
0288 return;
0289
0290 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
0291 clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
0292 }
0293
0294 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
0295 {
0296 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
0297 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
0298 rtwvif->trigger = false;
0299 }
0300
0301 static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
0302 {
0303 struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
0304 struct rtw89_dev *rtwdev = rtwvif->rtwdev;
0305 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
0306
0307 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
0308 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
0309 if (sta->tdls)
0310 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
0311 }
0312
0313 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
0314 {
0315 ieee80211_iterate_stations_atomic(rtwdev->hw,
0316 ser_sta_deinit_addr_cam_iter,
0317 rtwvif);
0318
0319 rtw89_cam_deinit(rtwdev, rtwvif);
0320 }
0321
0322 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
0323 {
0324 struct rtw89_vif *rtwvif;
0325
0326 rtw89_cam_reset_keys(rtwdev);
0327 rtw89_for_each_rtwvif(rtwdev, rtwvif)
0328 ser_deinit_cam(rtwdev, rtwvif);
0329
0330 rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
0331 rtw89_for_each_rtwvif(rtwdev, rtwvif)
0332 ser_reset_vif(rtwdev, rtwvif);
0333 }
0334
0335
0336 static int hal_enable_dma(struct rtw89_ser *ser)
0337 {
0338 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0339 int ret;
0340
0341 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
0342 return 0;
0343
0344 if (!rtwdev->hci.ops->mac_lv1_rcvy)
0345 return -EIO;
0346
0347 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
0348 if (!ret)
0349 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
0350
0351 return ret;
0352 }
0353
0354 static int hal_stop_dma(struct rtw89_ser *ser)
0355 {
0356 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0357 int ret;
0358
0359 if (!rtwdev->hci.ops->mac_lv1_rcvy)
0360 return -EIO;
0361
0362 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
0363 if (!ret)
0364 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
0365
0366 return ret;
0367 }
0368
0369 static void hal_send_m2_event(struct rtw89_ser *ser)
0370 {
0371 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0372
0373 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
0374 }
0375
0376 static void hal_send_m4_event(struct rtw89_ser *ser)
0377 {
0378 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0379
0380 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
0381 }
0382
0383
0384 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
0385 {
0386 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0387
0388 switch (evt) {
0389 case SER_EV_STATE_IN:
0390 rtw89_hci_recovery_complete(rtwdev);
0391 break;
0392 case SER_EV_L1_RESET:
0393 ser_state_goto(ser, SER_RESET_TRX_ST);
0394 break;
0395 case SER_EV_L2_RESET:
0396 ser_state_goto(ser, SER_L2_RESET_ST);
0397 break;
0398 case SER_EV_STATE_OUT:
0399 rtw89_hci_recovery_start(rtwdev);
0400 break;
0401 default:
0402 break;
0403 }
0404 }
0405
0406 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
0407 {
0408 switch (evt) {
0409 case SER_EV_STATE_IN:
0410 drv_stop_tx(ser);
0411
0412 if (hal_stop_dma(ser)) {
0413 ser_state_goto(ser, SER_L2_RESET_ST);
0414 break;
0415 }
0416
0417 drv_stop_rx(ser);
0418 drv_trx_reset(ser);
0419
0420
0421 hal_send_m2_event(ser);
0422
0423
0424 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
0425 break;
0426
0427 case SER_EV_DO_RECOVERY:
0428 ser_state_goto(ser, SER_DO_HCI_ST);
0429 break;
0430
0431 case SER_EV_M3_TIMEOUT:
0432 ser_state_goto(ser, SER_L2_RESET_ST);
0433 break;
0434
0435 case SER_EV_STATE_OUT:
0436 ser_del_alarm(ser);
0437 hal_enable_dma(ser);
0438 drv_resume_rx(ser);
0439 drv_resume_tx(ser);
0440 break;
0441
0442 default:
0443 break;
0444 }
0445 }
0446
0447 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
0448 {
0449 switch (evt) {
0450 case SER_EV_STATE_IN:
0451
0452 hal_send_m4_event(ser);
0453
0454
0455 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
0456 break;
0457
0458 case SER_EV_FW_M5_TIMEOUT:
0459 ser_state_goto(ser, SER_L2_RESET_ST);
0460 break;
0461
0462 case SER_EV_MAC_RESET_DONE:
0463 ser_state_goto(ser, SER_IDLE_ST);
0464 break;
0465
0466 case SER_EV_STATE_OUT:
0467 ser_del_alarm(ser);
0468 break;
0469
0470 default:
0471 break;
0472 }
0473 }
0474
0475 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
0476 u8 sel, u32 start_addr, u32 len)
0477 {
0478 u32 *ptr = (u32 *)buf;
0479 u32 base_addr, start_page, residue;
0480 u32 cnt = 0;
0481 u32 i;
0482
0483 start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
0484 residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
0485 base_addr = rtw89_mac_mem_base_addrs[sel];
0486 base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
0487
0488 while (cnt < len) {
0489 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
0490
0491 for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
0492 i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
0493 i += 4, ptr++) {
0494 *ptr = rtw89_read32(rtwdev, i);
0495 cnt += 4;
0496 if (cnt >= len)
0497 break;
0498 }
0499
0500 residue = 0;
0501 base_addr += MAC_MEM_DUMP_PAGE_SIZE;
0502 }
0503 }
0504
0505 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
0506 {
0507 u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
0508
0509 rtw89_debug(rtwdev, RTW89_DBG_SER,
0510 "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
0511 start_addr);
0512 ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
0513 RTW89_FW_RSVD_PLE_SIZE);
0514 }
0515
0516 struct __fw_backtrace_entry {
0517 u32 wcpu_addr;
0518 u32 size;
0519 u32 key;
0520 } __packed;
0521
0522 struct __fw_backtrace_info {
0523 u32 ra;
0524 u32 sp;
0525 } __packed;
0526
0527 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
0528 sizeof(struct __fw_backtrace_info));
0529
0530 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
0531 const struct __fw_backtrace_entry *ent)
0532 {
0533 struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
0534 u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
0535 u32 fwbt_size = ent->size;
0536 u32 fwbt_key = ent->key;
0537 u32 i;
0538
0539 if (fwbt_addr == 0) {
0540 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
0541 fwbt_addr);
0542 return -EINVAL;
0543 }
0544
0545 if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
0546 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
0547 fwbt_key);
0548 return -EINVAL;
0549 }
0550
0551 if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
0552 fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
0553 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
0554 fwbt_size);
0555 return -EINVAL;
0556 }
0557
0558 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
0559 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
0560
0561 for (i = R_AX_INDIR_ACCESS_ENTRY;
0562 i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
0563 i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
0564 *ptr = (struct __fw_backtrace_info){
0565 .ra = rtw89_read32(rtwdev, i),
0566 .sp = rtw89_read32(rtwdev, i + 4),
0567 };
0568 rtw89_debug(rtwdev, RTW89_DBG_SER,
0569 "next sp: 0x%x, next ra: 0x%x\n",
0570 ptr->sp, ptr->ra);
0571 }
0572
0573 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
0574 return 0;
0575 }
0576
0577 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
0578 {
0579 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0580 struct rtw89_ser_cd_buffer *buf;
0581 struct __fw_backtrace_entry fwbt_ent;
0582 int ret = 0;
0583
0584 buf = rtw89_ser_cd_prep(rtwdev);
0585 if (!buf) {
0586 ret = -ENOMEM;
0587 goto bottom;
0588 }
0589
0590 rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
0591
0592 fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
0593 ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
0594 if (ret)
0595 goto bottom;
0596
0597 rtw89_ser_cd_send(rtwdev, buf);
0598
0599 bottom:
0600 rtw89_ser_cd_free(rtwdev, buf, !!ret);
0601
0602 ser_reset_mac_binding(rtwdev);
0603 rtw89_core_stop(rtwdev);
0604 INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
0605 }
0606
0607 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
0608 {
0609 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
0610
0611 switch (evt) {
0612 case SER_EV_STATE_IN:
0613 mutex_lock(&rtwdev->mutex);
0614 ser_l2_reset_st_pre_hdl(ser);
0615 mutex_unlock(&rtwdev->mutex);
0616
0617 ieee80211_restart_hw(rtwdev->hw);
0618 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
0619 break;
0620
0621 case SER_EV_L2_RECFG_TIMEOUT:
0622 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
0623 fallthrough;
0624 case SER_EV_L2_RECFG_DONE:
0625 ser_state_goto(ser, SER_IDLE_ST);
0626 clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
0627 break;
0628
0629 case SER_EV_STATE_OUT:
0630 ser_del_alarm(ser);
0631 break;
0632
0633 default:
0634 break;
0635 }
0636 }
0637
0638 static const struct event_ent ser_ev_tbl[] = {
0639 {SER_EV_NONE, "SER_EV_NONE"},
0640 {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
0641 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
0642 {SER_EV_L1_RESET, "SER_EV_L1_RESET"},
0643 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
0644 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
0645 {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
0646 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
0647 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
0648 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
0649 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
0650 {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
0651 {SER_EV_MAXX, "SER_EV_MAX"}
0652 };
0653
0654 static const struct state_ent ser_st_tbl[] = {
0655 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
0656 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
0657 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
0658 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
0659 };
0660
0661 int rtw89_ser_init(struct rtw89_dev *rtwdev)
0662 {
0663 struct rtw89_ser *ser = &rtwdev->ser;
0664
0665 memset(ser, 0, sizeof(*ser));
0666 INIT_LIST_HEAD(&ser->msg_q);
0667 ser->state = SER_IDLE_ST;
0668 ser->st_tbl = ser_st_tbl;
0669 ser->ev_tbl = ser_ev_tbl;
0670
0671 bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
0672 spin_lock_init(&ser->msg_q_lock);
0673 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
0674 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
0675 return 0;
0676 }
0677
0678 int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
0679 {
0680 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
0681
0682 set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
0683 cancel_delayed_work_sync(&ser->ser_alarm_work);
0684 cancel_work_sync(&ser->ser_hdl_work);
0685 clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
0686 return 0;
0687 }
0688
0689 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
0690 {
0691 ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
0692 }
0693
0694 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
0695 {
0696 u8 event = SER_EV_NONE;
0697
0698 rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
0699
0700 switch (err) {
0701 case MAC_AX_ERR_L1_ERR_DMAC:
0702 case MAC_AX_ERR_L0_PROMOTE_TO_L1:
0703 event = SER_EV_L1_RESET;
0704 break;
0705 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
0706 event = SER_EV_DO_RECOVERY;
0707 break;
0708 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
0709 event = SER_EV_MAC_RESET_DONE;
0710 break;
0711 case MAC_AX_ERR_L0_ERR_CMAC0:
0712 case MAC_AX_ERR_L0_ERR_CMAC1:
0713 case MAC_AX_ERR_L0_RESET_DONE:
0714 event = SER_EV_L0_RESET;
0715 break;
0716 default:
0717 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
0718 (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
0719 err <= MAC_AX_GET_ERR_MAX))
0720 event = SER_EV_L2_RESET;
0721 break;
0722 }
0723
0724 if (event == SER_EV_NONE) {
0725 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
0726 return -EINVAL;
0727 }
0728
0729 ser_send_msg(&rtwdev->ser, event);
0730 return 0;
0731 }
0732 EXPORT_SYMBOL(rtw89_ser_notify);