0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/bits.h>
0018 #include <linux/bitfield.h>
0019 #include <linux/completion.h>
0020 #include <linux/device.h>
0021 #include <linux/delay.h>
0022 #include <linux/err.h>
0023 #include <linux/gfp.h>
0024 #include <linux/iopoll.h>
0025 #include <linux/jiffies.h>
0026 #include <linux/kernel.h>
0027 #include <linux/kthread.h>
0028 #include <linux/list.h>
0029 #include <linux/slab.h>
0030 #include <linux/spinlock.h>
0031 #include <linux/string.h>
0032 #include <linux/types.h>
0033 #include <linux/wait.h>
0034
0035 #include "t7xx_hif_cldma.h"
0036 #include "t7xx_mhccif.h"
0037 #include "t7xx_modem_ops.h"
0038 #include "t7xx_pci.h"
0039 #include "t7xx_pcie_mac.h"
0040 #include "t7xx_port_proxy.h"
0041 #include "t7xx_reg.h"
0042 #include "t7xx_state_monitor.h"
0043
0044 #define FSM_DRM_DISABLE_DELAY_MS 200
0045 #define FSM_EVENT_POLL_INTERVAL_MS 20
0046 #define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
0047 #define FSM_MD_EX_PASS_TIMEOUT_MS 45000
0048 #define FSM_CMD_TIMEOUT_MS 2000
0049
0050 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
0051 {
0052 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0053 unsigned long flags;
0054
0055 spin_lock_irqsave(&ctl->notifier_lock, flags);
0056 list_add_tail(¬ifier->entry, &ctl->notifier_list);
0057 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
0058 }
0059
0060 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
0061 {
0062 struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
0063 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0064 unsigned long flags;
0065
0066 spin_lock_irqsave(&ctl->notifier_lock, flags);
0067 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
0068 if (notifier_cur == notifier)
0069 list_del(¬ifier->entry);
0070 }
0071 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
0072 }
0073
0074 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
0075 {
0076 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0077 struct t7xx_fsm_notifier *notifier;
0078 unsigned long flags;
0079
0080 spin_lock_irqsave(&ctl->notifier_lock, flags);
0081 list_for_each_entry(notifier, &ctl->notifier_list, entry) {
0082 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
0083 if (notifier->notifier_fn)
0084 notifier->notifier_fn(state, notifier->data);
0085
0086 spin_lock_irqsave(&ctl->notifier_lock, flags);
0087 }
0088 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
0089 }
0090
0091 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
0092 {
0093 ctl->md_state = state;
0094
0095
0096 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
0097 fsm_state_notify(ctl->md, state);
0098 }
0099
0100 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
0101 {
0102 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
0103 *cmd->ret = result;
0104 complete_all(cmd->done);
0105 }
0106
0107 kfree(cmd);
0108 }
0109
0110 static void fsm_del_kf_event(struct t7xx_fsm_event *event)
0111 {
0112 list_del(&event->entry);
0113 kfree(event);
0114 }
0115
0116 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
0117 {
0118 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
0119 struct t7xx_fsm_event *event, *evt_next;
0120 struct t7xx_fsm_command *cmd, *cmd_next;
0121 unsigned long flags;
0122
0123 spin_lock_irqsave(&ctl->command_lock, flags);
0124 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
0125 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
0126 list_del(&cmd->entry);
0127 fsm_finish_command(ctl, cmd, -EINVAL);
0128 }
0129 spin_unlock_irqrestore(&ctl->command_lock, flags);
0130
0131 spin_lock_irqsave(&ctl->event_lock, flags);
0132 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
0133 dev_warn(dev, "Unhandled event %d\n", event->event_id);
0134 fsm_del_kf_event(event);
0135 }
0136 spin_unlock_irqrestore(&ctl->event_lock, flags);
0137 }
0138
0139 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
0140 enum t7xx_fsm_event_state event_ignore, int retries)
0141 {
0142 struct t7xx_fsm_event *event;
0143 bool event_received = false;
0144 unsigned long flags;
0145 int cnt = 0;
0146
0147 while (cnt++ < retries && !event_received) {
0148 bool sleep_required = true;
0149
0150 if (kthread_should_stop())
0151 return;
0152
0153 spin_lock_irqsave(&ctl->event_lock, flags);
0154 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
0155 if (event) {
0156 event_received = event->event_id == event_expected;
0157 if (event_received || event->event_id == event_ignore) {
0158 fsm_del_kf_event(event);
0159 sleep_required = false;
0160 }
0161 }
0162 spin_unlock_irqrestore(&ctl->event_lock, flags);
0163
0164 if (sleep_required)
0165 msleep(FSM_EVENT_POLL_INTERVAL_MS);
0166 }
0167 }
0168
0169 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
0170 enum t7xx_ex_reason reason)
0171 {
0172 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
0173
0174 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
0175 if (cmd)
0176 fsm_finish_command(ctl, cmd, -EINVAL);
0177
0178 return;
0179 }
0180
0181 ctl->curr_state = FSM_STATE_EXCEPTION;
0182
0183 switch (reason) {
0184 case EXCEPTION_HS_TIMEOUT:
0185 dev_err(dev, "Boot Handshake failure\n");
0186 break;
0187
0188 case EXCEPTION_EVENT:
0189 dev_err(dev, "Exception event\n");
0190 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
0191 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
0192 t7xx_md_exception_handshake(ctl->md);
0193
0194 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
0195 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
0196 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
0197 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
0198 break;
0199
0200 default:
0201 dev_err(dev, "Exception %d\n", reason);
0202 break;
0203 }
0204
0205 if (cmd)
0206 fsm_finish_command(ctl, cmd, 0);
0207 }
0208
0209 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
0210 {
0211 ctl->curr_state = FSM_STATE_STOPPED;
0212
0213 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
0214 return t7xx_md_reset(ctl->md->t7xx_dev);
0215 }
0216
0217 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
0218 {
0219 if (ctl->curr_state == FSM_STATE_STOPPED) {
0220 fsm_finish_command(ctl, cmd, -EINVAL);
0221 return;
0222 }
0223
0224 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
0225 }
0226
0227 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
0228 {
0229 struct t7xx_pci_dev *t7xx_dev;
0230 struct cldma_ctrl *md_ctrl;
0231 int err;
0232
0233 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
0234 fsm_finish_command(ctl, cmd, -EINVAL);
0235 return;
0236 }
0237
0238 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
0239 t7xx_dev = ctl->md->t7xx_dev;
0240
0241 ctl->curr_state = FSM_STATE_STOPPING;
0242 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
0243 t7xx_cldma_stop(md_ctrl);
0244
0245 if (!ctl->md->rgu_irq_asserted) {
0246 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
0247
0248 msleep(FSM_DRM_DISABLE_DELAY_MS);
0249
0250 err = t7xx_acpi_fldr_func(t7xx_dev);
0251 if (err)
0252 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
0253 }
0254
0255 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
0256 }
0257
0258 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
0259 {
0260 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
0261 return;
0262
0263 ctl->md_state = MD_STATE_READY;
0264
0265 fsm_state_notify(ctl->md, MD_STATE_READY);
0266 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
0267 }
0268
0269 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
0270 {
0271 struct t7xx_modem *md = ctl->md;
0272
0273 ctl->curr_state = FSM_STATE_READY;
0274 t7xx_fsm_broadcast_ready_state(ctl);
0275 t7xx_md_event_notify(md, FSM_READY);
0276 }
0277
0278 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
0279 {
0280 struct t7xx_modem *md = ctl->md;
0281 struct device *dev;
0282
0283 ctl->curr_state = FSM_STATE_STARTING;
0284
0285 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
0286 t7xx_md_event_notify(md, FSM_START);
0287
0288 wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
0289 HZ * 60);
0290 dev = &md->t7xx_dev->pdev->dev;
0291
0292 if (ctl->exp_flg)
0293 dev_err(dev, "MD exception is captured during handshake\n");
0294
0295 if (!md->core_md.ready) {
0296 dev_err(dev, "MD handshake timeout\n");
0297 if (md->core_md.handshake_ongoing)
0298 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
0299
0300 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
0301 return -ETIMEDOUT;
0302 }
0303
0304 t7xx_pci_pm_init_late(md->t7xx_dev);
0305 fsm_routine_ready(ctl);
0306 return 0;
0307 }
0308
0309 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
0310 {
0311 struct t7xx_modem *md = ctl->md;
0312 u32 dev_status;
0313 int ret;
0314
0315 if (!md)
0316 return;
0317
0318 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
0319 ctl->curr_state != FSM_STATE_STOPPED) {
0320 fsm_finish_command(ctl, cmd, -EINVAL);
0321 return;
0322 }
0323
0324 ctl->curr_state = FSM_STATE_PRE_START;
0325 t7xx_md_event_notify(md, FSM_PRE_START);
0326
0327 ret = read_poll_timeout(ioread32, dev_status,
0328 (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
0329 false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
0330 if (ret) {
0331 struct device *dev = &md->t7xx_dev->pdev->dev;
0332
0333 fsm_finish_command(ctl, cmd, -ETIMEDOUT);
0334 dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
0335 return;
0336 }
0337
0338 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
0339 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
0340 }
0341
0342 static int fsm_main_thread(void *data)
0343 {
0344 struct t7xx_fsm_ctl *ctl = data;
0345 struct t7xx_fsm_command *cmd;
0346 unsigned long flags;
0347
0348 while (!kthread_should_stop()) {
0349 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
0350 kthread_should_stop()))
0351 continue;
0352
0353 if (kthread_should_stop())
0354 break;
0355
0356 spin_lock_irqsave(&ctl->command_lock, flags);
0357 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
0358 list_del(&cmd->entry);
0359 spin_unlock_irqrestore(&ctl->command_lock, flags);
0360
0361 switch (cmd->cmd_id) {
0362 case FSM_CMD_START:
0363 fsm_routine_start(ctl, cmd);
0364 break;
0365
0366 case FSM_CMD_EXCEPTION:
0367 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
0368 break;
0369
0370 case FSM_CMD_PRE_STOP:
0371 fsm_routine_stopping(ctl, cmd);
0372 break;
0373
0374 case FSM_CMD_STOP:
0375 fsm_routine_stopped(ctl, cmd);
0376 break;
0377
0378 default:
0379 fsm_finish_command(ctl, cmd, -EINVAL);
0380 fsm_flush_event_cmd_qs(ctl);
0381 break;
0382 }
0383 }
0384
0385 return 0;
0386 }
0387
0388 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
0389 {
0390 DECLARE_COMPLETION_ONSTACK(done);
0391 struct t7xx_fsm_command *cmd;
0392 unsigned long flags;
0393 int ret;
0394
0395 cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
0396 if (!cmd)
0397 return -ENOMEM;
0398
0399 INIT_LIST_HEAD(&cmd->entry);
0400 cmd->cmd_id = cmd_id;
0401 cmd->flag = flag;
0402 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
0403 cmd->done = &done;
0404 cmd->ret = &ret;
0405 }
0406
0407 spin_lock_irqsave(&ctl->command_lock, flags);
0408 list_add_tail(&cmd->entry, &ctl->command_queue);
0409 spin_unlock_irqrestore(&ctl->command_lock, flags);
0410
0411 wake_up(&ctl->command_wq);
0412
0413 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
0414 unsigned long wait_ret;
0415
0416 wait_ret = wait_for_completion_timeout(&done,
0417 msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
0418 if (!wait_ret)
0419 return -ETIMEDOUT;
0420
0421 return ret;
0422 }
0423
0424 return 0;
0425 }
0426
0427 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
0428 unsigned char *data, unsigned int length)
0429 {
0430 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
0431 struct t7xx_fsm_event *event;
0432 unsigned long flags;
0433
0434 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
0435 dev_err(dev, "Invalid event %d\n", event_id);
0436 return -EINVAL;
0437 }
0438
0439 event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
0440 if (!event)
0441 return -ENOMEM;
0442
0443 INIT_LIST_HEAD(&event->entry);
0444 event->event_id = event_id;
0445 event->length = length;
0446
0447 if (data && length)
0448 memcpy(event->data, data, length);
0449
0450 spin_lock_irqsave(&ctl->event_lock, flags);
0451 list_add_tail(&event->entry, &ctl->event_queue);
0452 spin_unlock_irqrestore(&ctl->event_lock, flags);
0453
0454 wake_up_all(&ctl->event_wq);
0455 return 0;
0456 }
0457
0458 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
0459 {
0460 struct t7xx_fsm_event *event, *evt_next;
0461 unsigned long flags;
0462
0463 spin_lock_irqsave(&ctl->event_lock, flags);
0464 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
0465 if (event->event_id == event_id)
0466 fsm_del_kf_event(event);
0467 }
0468 spin_unlock_irqrestore(&ctl->event_lock, flags);
0469 }
0470
0471 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
0472 {
0473 if (ctl)
0474 return ctl->md_state;
0475
0476 return MD_STATE_INVALID;
0477 }
0478
0479 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
0480 {
0481 if (ctl)
0482 return ctl->curr_state;
0483
0484 return FSM_STATE_STOPPED;
0485 }
0486
0487 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
0488 {
0489 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
0490
0491 if (type == MD_IRQ_PORT_ENUM) {
0492 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
0493 } else if (type == MD_IRQ_CCIF_EX) {
0494 ctl->exp_flg = true;
0495 wake_up(&ctl->async_hk_wq);
0496 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
0497 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
0498 }
0499
0500 return -EINVAL;
0501 }
0502
0503 void t7xx_fsm_reset(struct t7xx_modem *md)
0504 {
0505 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0506
0507 fsm_flush_event_cmd_qs(ctl);
0508 ctl->curr_state = FSM_STATE_STOPPED;
0509 ctl->exp_flg = false;
0510 }
0511
0512 int t7xx_fsm_init(struct t7xx_modem *md)
0513 {
0514 struct device *dev = &md->t7xx_dev->pdev->dev;
0515 struct t7xx_fsm_ctl *ctl;
0516
0517 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
0518 if (!ctl)
0519 return -ENOMEM;
0520
0521 md->fsm_ctl = ctl;
0522 ctl->md = md;
0523 ctl->curr_state = FSM_STATE_INIT;
0524 INIT_LIST_HEAD(&ctl->command_queue);
0525 INIT_LIST_HEAD(&ctl->event_queue);
0526 init_waitqueue_head(&ctl->async_hk_wq);
0527 init_waitqueue_head(&ctl->event_wq);
0528 INIT_LIST_HEAD(&ctl->notifier_list);
0529 init_waitqueue_head(&ctl->command_wq);
0530 spin_lock_init(&ctl->event_lock);
0531 spin_lock_init(&ctl->command_lock);
0532 ctl->exp_flg = false;
0533 spin_lock_init(&ctl->notifier_lock);
0534
0535 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
0536 return PTR_ERR_OR_ZERO(ctl->fsm_thread);
0537 }
0538
0539 void t7xx_fsm_uninit(struct t7xx_modem *md)
0540 {
0541 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
0542
0543 if (!ctl)
0544 return;
0545
0546 if (ctl->fsm_thread)
0547 kthread_stop(ctl->fsm_thread);
0548
0549 fsm_flush_event_cmd_qs(ctl);
0550 }