0001
0002
0003
0004
0005
0006 #include <linux/delay.h>
0007
0008 #include "iosm_ipc_chnl_cfg.h"
0009 #include "iosm_ipc_devlink.h"
0010 #include "iosm_ipc_flash.h"
0011 #include "iosm_ipc_imem.h"
0012 #include "iosm_ipc_port.h"
0013 #include "iosm_ipc_trace.h"
0014 #include "iosm_ipc_debugfs.h"
0015
0016
0017 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
0018 {
0019 if (chnl)
0020 return chnl->ctype == IPC_CTYPE_WWAN &&
0021 chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
0022 return false;
0023 }
0024
0025 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
0026 {
0027 union ipc_msg_prep_args prep_args = {
0028 .sleep.target = 1,
0029 .sleep.state = state,
0030 };
0031
0032 ipc_imem->device_sleep = state;
0033
0034 return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
0035 IPC_MSG_PREP_SLEEP, &prep_args, NULL);
0036 }
0037
0038 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
0039 struct ipc_pipe *pipe)
0040 {
0041
0042 if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
0043 return false;
0044
0045 return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
0046 }
0047
0048
0049
0050
0051 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
0052 void *msg, size_t size)
0053 {
0054 bool new_buffers_available = false;
0055 bool retry_allocation = false;
0056 int i;
0057
0058 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
0059 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
0060
0061 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
0062 continue;
0063
0064 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
0065 new_buffers_available = true;
0066
0067 if (pipe->nr_of_queued_entries == 0)
0068 retry_allocation = true;
0069 }
0070
0071 if (new_buffers_available)
0072 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0073 IPC_HP_DL_PROCESS);
0074
0075 if (retry_allocation) {
0076 ipc_imem->hrtimer_period =
0077 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
0078 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
0079 hrtimer_start(&ipc_imem->td_alloc_timer,
0080 ipc_imem->hrtimer_period,
0081 HRTIMER_MODE_REL);
0082 }
0083 return 0;
0084 }
0085
0086 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
0087 {
0088 struct iosm_imem *ipc_imem =
0089 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
0090
0091 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
0092 0, false);
0093 return HRTIMER_NORESTART;
0094 }
0095
0096
0097 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
0098 void *msg, size_t size)
0099 {
0100 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0101 IPC_HP_FAST_TD_UPD_TMR);
0102
0103 return 0;
0104 }
0105
0106 static enum hrtimer_restart
0107 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
0108 {
0109 struct iosm_imem *ipc_imem =
0110 container_of(hr_timer, struct iosm_imem, fast_update_timer);
0111
0112 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
0113 NULL, 0, false);
0114 return HRTIMER_NORESTART;
0115 }
0116
0117 static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
0118 void *msg, size_t size)
0119 {
0120 ipc_mux_ul_adb_finish(ipc_imem->mux);
0121 return 0;
0122 }
0123
0124 static enum hrtimer_restart
0125 ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
0126 {
0127 struct iosm_imem *ipc_imem =
0128 container_of(hr_timer, struct iosm_imem, adb_timer);
0129
0130 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
0131 NULL, 0, false);
0132 return HRTIMER_NORESTART;
0133 }
0134
0135 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
0136 struct ipc_mux_config *cfg)
0137 {
0138 ipc_mmio_update_cp_capability(ipc_imem->mmio);
0139
0140 if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
0141 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
0142 return -EINVAL;
0143 }
0144
0145 cfg->protocol = ipc_imem->mmio->mux_protocol;
0146
0147 cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
0148 MUX_UL_ON_CREDITS :
0149 MUX_UL;
0150
0151
0152
0153
0154 cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
0155
0156 return 0;
0157 }
0158
0159 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
0160 unsigned int reset_enable, bool atomic_ctx)
0161 {
0162 union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
0163 reset_enable };
0164
0165 if (atomic_ctx)
0166 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
0167 IPC_MSG_PREP_FEATURE_SET, &prep_args,
0168 NULL);
0169 else
0170 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
0171 IPC_MSG_PREP_FEATURE_SET, &prep_args);
0172 }
0173
0174
0175
0176
0177
0178 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
0179 {
0180
0181 if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
0182
0183 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0184 IPC_HP_TD_UPD_TMR_START);
0185 return;
0186 }
0187
0188 if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
0189 ipc_imem->hrtimer_period =
0190 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
0191 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
0192 hrtimer_start(&ipc_imem->tdupdate_timer,
0193 ipc_imem->hrtimer_period,
0194 HRTIMER_MODE_REL);
0195 }
0196 }
0197
0198 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
0199 {
0200 if (hrtimer_active(hr_timer))
0201 hrtimer_cancel(hr_timer);
0202 }
0203
0204
0205
0206
0207
0208 void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
0209 {
0210 if (!hrtimer_active(&ipc_imem->adb_timer)) {
0211 ipc_imem->hrtimer_period =
0212 ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
0213 hrtimer_start(&ipc_imem->adb_timer,
0214 ipc_imem->hrtimer_period,
0215 HRTIMER_MODE_REL);
0216 }
0217 }
0218
0219 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
0220 {
0221 struct ipc_mem_channel *channel;
0222 bool hpda_ctrl_pending = false;
0223 struct sk_buff_head *ul_list;
0224 bool hpda_pending = false;
0225 struct ipc_pipe *pipe;
0226 int i;
0227
0228
0229 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
0230 channel = &ipc_imem->channels[i];
0231
0232 if (channel->state != IMEM_CHANNEL_ACTIVE)
0233 continue;
0234
0235 pipe = &channel->ul_pipe;
0236
0237
0238 ul_list = &channel->ul_list;
0239
0240
0241 if (!ipc_imem_check_wwan_ips(channel)) {
0242 hpda_ctrl_pending |=
0243 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
0244 pipe, ul_list);
0245 } else {
0246 hpda_pending |=
0247 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
0248 pipe, ul_list);
0249 }
0250 }
0251
0252
0253 if (hpda_ctrl_pending) {
0254 hpda_pending = false;
0255 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0256 IPC_HP_UL_WRITE_TD);
0257 }
0258
0259 return hpda_pending;
0260 }
0261
0262 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
0263 {
0264 int timeout = IPC_MODEM_BOOT_TIMEOUT;
0265
0266 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
0267
0268
0269 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
0270 IPC_MEM_DEVICE_IPC_INIT);
0271
0272 do {
0273 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
0274 ipc_imem->ipc_requested_state) {
0275
0276 ipc_mmio_config(ipc_imem->mmio);
0277
0278
0279 ipc_imem->ipc_requested_state =
0280 IPC_MEM_DEVICE_IPC_RUNNING;
0281 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
0282 IPC_MEM_DEVICE_IPC_RUNNING);
0283
0284 return;
0285 }
0286 msleep(20);
0287 } while (--timeout);
0288
0289
0290 dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
0291 ipc_imem_phase_get_string(ipc_imem->phase),
0292 ipc_mmio_get_ipc_state(ipc_imem->mmio));
0293
0294 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
0295 }
0296
0297
0298 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
0299 struct ipc_pipe *pipe, struct sk_buff *skb)
0300 {
0301 u16 port_id;
0302
0303 if (!skb)
0304 return;
0305
0306
0307 switch (pipe->channel->ctype) {
0308 case IPC_CTYPE_CTRL:
0309 port_id = pipe->channel->channel_id;
0310 ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
0311 IPC_CB(skb)->mapping,
0312 IPC_CB(skb)->direction);
0313 if (port_id == IPC_MEM_CTRL_CHL_ID_7)
0314 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
0315 skb);
0316 else if (ipc_is_trace_channel(ipc_imem, port_id))
0317 ipc_trace_port_rx(ipc_imem, skb);
0318 else
0319 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
0320 skb);
0321 break;
0322
0323 case IPC_CTYPE_WWAN:
0324 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
0325 ipc_mux_dl_decode(ipc_imem->mux, skb);
0326 break;
0327 default:
0328 dev_err(ipc_imem->dev, "Invalid channel type");
0329 break;
0330 }
0331 }
0332
0333
0334 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
0335 struct ipc_pipe *pipe)
0336 {
0337 s32 cnt = 0, processed_td_cnt = 0;
0338 struct ipc_mem_channel *channel;
0339 u32 head = 0, tail = 0;
0340 bool processed = false;
0341 struct sk_buff *skb;
0342
0343 channel = pipe->channel;
0344
0345 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
0346 &tail);
0347 if (pipe->old_tail != tail) {
0348 if (pipe->old_tail < tail)
0349 cnt = tail - pipe->old_tail;
0350 else
0351 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
0352 }
0353
0354 processed_td_cnt = cnt;
0355
0356
0357 while (cnt--) {
0358 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
0359
0360
0361 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
0362 }
0363
0364
0365 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
0366 processed = true;
0367
0368 if (processed && !ipc_imem_check_wwan_ips(channel)) {
0369
0370 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0371 IPC_HP_DL_PROCESS);
0372 processed = false;
0373
0374
0375 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
0376 }
0377
0378
0379
0380
0381
0382 if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
0383 ipc_imem->hrtimer_period =
0384 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
0385 hrtimer_start(&ipc_imem->fast_update_timer,
0386 ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
0387 }
0388
0389 if (ipc_imem->app_notify_dl_pend)
0390 complete(&ipc_imem->dl_pend_sem);
0391 }
0392
0393
0394 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
0395 struct ipc_pipe *pipe)
0396 {
0397 struct ipc_mem_channel *channel;
0398 u32 tail = 0, head = 0;
0399 struct sk_buff *skb;
0400 s32 cnt = 0;
0401
0402 channel = pipe->channel;
0403
0404
0405 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
0406 &tail);
0407
0408 if (pipe->old_tail != tail) {
0409 if (pipe->old_tail < tail)
0410 cnt = tail - pipe->old_tail;
0411 else
0412 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
0413 }
0414
0415
0416 while (cnt--) {
0417 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
0418
0419 if (!skb)
0420 continue;
0421
0422
0423
0424
0425 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
0426 complete(&channel->ul_sem);
0427
0428
0429 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
0430 if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
0431 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
0432 else
0433 dev_err(ipc_imem->dev,
0434 "OP Type is UL_MUX, unknown if_id %d",
0435 channel->if_id);
0436 } else {
0437 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
0438 }
0439 }
0440
0441
0442 if (ipc_imem_check_wwan_ips(pipe->channel))
0443 ipc_mux_check_n_restart_tx(ipc_imem->mux);
0444
0445 if (ipc_imem->app_notify_ul_pend)
0446 complete(&ipc_imem->ul_pend_sem);
0447 }
0448
0449
0450 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
0451 {
0452 struct ipc_mem_channel *channel;
0453
0454 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
0455 ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
0456 complete(&channel->ul_sem);
0457 }
0458
0459
0460 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
0461 void *msg, size_t size)
0462 {
0463 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
0464 IPC_HP_TD_UPD_TMR);
0465 return 0;
0466 }
0467
0468
0469 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
0470 {
0471
0472 if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
0473 hrtimer_active(&ipc_imem->tdupdate_timer)) {
0474
0475 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
0476
0477 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
0478
0479 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
0480 }
0481 }
0482
0483
0484 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
0485 void *msg, size_t size)
0486 {
0487
0488 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
0489 return -EIO;
0490
0491 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
0492 IPC_MEM_DEVICE_IPC_UNINIT) {
0493 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
0494
0495 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
0496 IPC_MEM_DEVICE_IPC_INIT);
0497
0498 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
0499
0500 if (!hrtimer_active(&ipc_imem->startup_timer))
0501 hrtimer_start(&ipc_imem->startup_timer,
0502 ipc_imem->hrtimer_period,
0503 HRTIMER_MODE_REL);
0504 } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
0505 IPC_MEM_DEVICE_IPC_INIT) {
0506
0507 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
0508
0509
0510 ipc_mmio_config(ipc_imem->mmio);
0511 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
0512 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
0513 IPC_MEM_DEVICE_IPC_RUNNING);
0514 }
0515
0516 return 0;
0517 }
0518
0519 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
0520 {
0521 enum hrtimer_restart result = HRTIMER_NORESTART;
0522 struct iosm_imem *ipc_imem =
0523 container_of(hr_timer, struct iosm_imem, startup_timer);
0524
0525 if (ktime_to_ns(ipc_imem->hrtimer_period)) {
0526 hrtimer_forward_now(&ipc_imem->startup_timer,
0527 ipc_imem->hrtimer_period);
0528 result = HRTIMER_RESTART;
0529 }
0530
0531 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
0532 NULL, 0, false);
0533 return result;
0534 }
0535
0536
0537 static enum ipc_mem_exec_stage
0538 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
0539 {
0540 return (ipc_imem->phase == IPC_P_RUN &&
0541 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
0542 ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
0543 ipc_mmio_get_exec_stage(ipc_imem->mmio);
0544 }
0545
0546
0547 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
0548 void *msg, size_t size)
0549 {
0550 enum ipc_mem_exec_stage exec_stage =
0551 ipc_imem_get_exec_stage_buffered(ipc_imem);
0552
0553 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
0554 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
0555
0556 return 0;
0557 }
0558
0559
0560
0561
0562 static void ipc_imem_run_state_worker(struct work_struct *instance)
0563 {
0564 struct ipc_chnl_cfg chnl_cfg_port = { 0 };
0565 struct ipc_mux_config mux_cfg;
0566 struct iosm_imem *ipc_imem;
0567 u8 ctrl_chl_idx = 0;
0568
0569 ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
0570
0571 if (ipc_imem->phase != IPC_P_RUN) {
0572 dev_err(ipc_imem->dev,
0573 "Modem link down. Exit run state worker.");
0574 return;
0575 }
0576
0577 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
0578 ipc_devlink_deinit(ipc_imem->ipc_devlink);
0579
0580 if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
0581 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
0582
0583 ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
0584 if (ipc_imem->mux)
0585 ipc_imem->mux->wwan = ipc_imem->wwan;
0586
0587 while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
0588 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
0589 ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
0590 if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
0591 chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
0592 ctrl_chl_idx++;
0593 continue;
0594 }
0595 if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
0596 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
0597 chnl_cfg_port,
0598 IRQ_MOD_OFF);
0599 ipc_imem->ipc_port[ctrl_chl_idx] =
0600 ipc_port_init(ipc_imem, chnl_cfg_port);
0601 }
0602 }
0603 ctrl_chl_idx++;
0604 }
0605
0606 ipc_debugfs_init(ipc_imem);
0607
0608 ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
0609 false);
0610
0611
0612 smp_mb__before_atomic();
0613
0614 set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
0615
0616
0617 smp_mb__after_atomic();
0618 }
0619
0620 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
0621 {
0622 enum ipc_mem_device_ipc_state curr_ipc_status;
0623 enum ipc_phase old_phase, phase;
0624 bool retry_allocation = false;
0625 bool ul_pending = false;
0626 int i;
0627
0628 if (irq != IMEM_IRQ_DONT_CARE)
0629 ipc_imem->ev_irq_pending[irq] = false;
0630
0631
0632 old_phase = ipc_imem->phase;
0633
0634 if (old_phase == IPC_P_OFF_REQ) {
0635 dev_dbg(ipc_imem->dev,
0636 "[%s]: Ignoring MSI. Deinit sequence in progress!",
0637 ipc_imem_phase_get_string(old_phase));
0638 return;
0639 }
0640
0641
0642 phase = ipc_imem_phase_update(ipc_imem);
0643
0644 switch (phase) {
0645 case IPC_P_RUN:
0646 if (!ipc_imem->enter_runtime) {
0647
0648 ipc_imem->enter_runtime = 1;
0649
0650
0651
0652
0653 ipc_imem_msg_send_device_sleep(ipc_imem,
0654 ipc_imem->device_sleep);
0655
0656 ipc_imem_msg_send_feature_set(ipc_imem,
0657 IPC_MEM_INBAND_CRASH_SIG,
0658 true);
0659 }
0660
0661 curr_ipc_status =
0662 ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
0663
0664
0665 if (ipc_imem->ipc_status != curr_ipc_status) {
0666 ipc_imem->ipc_status = curr_ipc_status;
0667
0668 if (ipc_imem->ipc_status ==
0669 IPC_MEM_DEVICE_IPC_RUNNING) {
0670 schedule_work(&ipc_imem->run_state_worker);
0671 }
0672 }
0673
0674
0675 ipc_imem_slp_control_exec(ipc_imem);
0676 break;
0677
0678
0679 case IPC_P_OFF:
0680 case IPC_P_OFF_REQ:
0681 dev_err(ipc_imem->dev, "confused phase %s",
0682 ipc_imem_phase_get_string(phase));
0683 return;
0684
0685 case IPC_P_PSI:
0686 if (old_phase != IPC_P_ROM)
0687 break;
0688
0689 fallthrough;
0690
0691
0692 case IPC_P_ROM:
0693
0694
0695
0696
0697 ipc_imem_rom_irq_exec(ipc_imem);
0698 return;
0699
0700 default:
0701 break;
0702 }
0703
0704
0705 ipc_protocol_msg_process(ipc_imem, irq);
0706
0707
0708 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
0709 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
0710 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
0711
0712 if (dl_pipe->is_open &&
0713 (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
0714 ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
0715
0716 if (dl_pipe->nr_of_queued_entries == 0)
0717 retry_allocation = true;
0718 }
0719
0720 if (ul_pipe->is_open)
0721 ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
0722 }
0723
0724
0725 if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
0726 ipc_imem_td_update_timer_start(ipc_imem);
0727 if (ipc_imem->mux->protocol == MUX_AGGREGATION)
0728 ipc_imem_adb_timer_start(ipc_imem);
0729 }
0730
0731
0732
0733
0734 ul_pending |= ipc_imem_ul_write_td(ipc_imem);
0735
0736
0737 if (ul_pending) {
0738 ipc_imem->hrtimer_period =
0739 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
0740 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
0741 hrtimer_start(&ipc_imem->tdupdate_timer,
0742 ipc_imem->hrtimer_period,
0743 HRTIMER_MODE_REL);
0744 }
0745
0746
0747
0748
0749
0750 if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
0751 ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
0752 ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
0753 IPC_MEM_DEVICE_IPC_RUNNING) {
0754 complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
0755 }
0756
0757
0758 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
0759
0760 if (retry_allocation) {
0761 ipc_imem->hrtimer_period =
0762 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
0763 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
0764 hrtimer_start(&ipc_imem->td_alloc_timer,
0765 ipc_imem->hrtimer_period,
0766 HRTIMER_MODE_REL);
0767 }
0768 }
0769
0770
0771 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
0772 size_t size)
0773 {
0774 ipc_imem_handle_irq(ipc_imem, arg);
0775
0776 return 0;
0777 }
0778
0779 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
0780 {
0781
0782 if (ipc_imem_ul_write_td(ipc_imem))
0783 ipc_imem_td_update_timer_start(ipc_imem);
0784 }
0785
0786
0787 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
0788 enum ipc_mem_exec_stage stage)
0789 {
0790 switch (stage) {
0791 case IPC_MEM_EXEC_STAGE_BOOT:
0792 if (ipc_imem->phase != IPC_P_ROM) {
0793
0794 ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
0795 }
0796
0797 ipc_imem->phase = IPC_P_ROM;
0798 break;
0799
0800 case IPC_MEM_EXEC_STAGE_PSI:
0801 ipc_imem->phase = IPC_P_PSI;
0802 break;
0803
0804 case IPC_MEM_EXEC_STAGE_EBL:
0805 ipc_imem->phase = IPC_P_EBL;
0806 break;
0807
0808 case IPC_MEM_EXEC_STAGE_RUN:
0809 if (ipc_imem->phase != IPC_P_RUN &&
0810 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
0811 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
0812 }
0813 ipc_imem->phase = IPC_P_RUN;
0814 break;
0815
0816 case IPC_MEM_EXEC_STAGE_CRASH:
0817 if (ipc_imem->phase != IPC_P_CRASH)
0818 ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
0819
0820 ipc_imem->phase = IPC_P_CRASH;
0821 break;
0822
0823 case IPC_MEM_EXEC_STAGE_CD_READY:
0824 if (ipc_imem->phase != IPC_P_CD_READY)
0825 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
0826 ipc_imem->phase = IPC_P_CD_READY;
0827 break;
0828
0829 default:
0830
0831
0832
0833 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
0834 break;
0835 }
0836
0837 return ipc_imem->phase;
0838 }
0839
0840
0841 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
0842 struct ipc_pipe *pipe)
0843 {
0844 union ipc_msg_prep_args prep_args = {
0845 .pipe_open.pipe = pipe,
0846 };
0847
0848 if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
0849 IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
0850 pipe->is_open = true;
0851
0852 return pipe->is_open;
0853 }
0854
0855
0856 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
0857 void *msg, size_t size)
0858 {
0859 struct ipc_pipe *dl_pipe = msg;
0860 bool processed = false;
0861 int i;
0862
0863 for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
0864 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
0865
0866
0867
0868
0869 if (processed)
0870 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
0871
0872 return 0;
0873 }
0874
0875 static enum hrtimer_restart
0876 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
0877 {
0878 struct iosm_imem *ipc_imem =
0879 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
0880
0881 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
0882 NULL, 0, false);
0883 return HRTIMER_NORESTART;
0884 }
0885
0886
0887 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
0888 {
0889 enum ipc_mem_exec_stage exec_stage =
0890 ipc_imem_get_exec_stage_buffered(ipc_imem);
0891
0892 return ipc_imem->phase == IPC_P_OFF_REQ ?
0893 ipc_imem->phase :
0894 ipc_imem_phase_update_check(ipc_imem, exec_stage);
0895 }
0896
0897 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
0898 {
0899 switch (phase) {
0900 case IPC_P_RUN:
0901 return "A-RUN";
0902
0903 case IPC_P_OFF:
0904 return "A-OFF";
0905
0906 case IPC_P_ROM:
0907 return "A-ROM";
0908
0909 case IPC_P_PSI:
0910 return "A-PSI";
0911
0912 case IPC_P_EBL:
0913 return "A-EBL";
0914
0915 case IPC_P_CRASH:
0916 return "A-CRASH";
0917
0918 case IPC_P_CD_READY:
0919 return "A-CD_READY";
0920
0921 case IPC_P_OFF_REQ:
0922 return "A-OFF_REQ";
0923
0924 default:
0925 return "A-???";
0926 }
0927 }
0928
0929 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
0930 {
0931 union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
0932
0933 pipe->is_open = false;
0934 ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
0935 &prep_args);
0936
0937 ipc_imem_pipe_cleanup(ipc_imem, pipe);
0938 }
0939
0940 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
0941 {
0942 struct ipc_mem_channel *channel;
0943
0944 if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
0945 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
0946 return;
0947 }
0948
0949 channel = &ipc_imem->channels[channel_id];
0950
0951 if (channel->state == IMEM_CHANNEL_FREE) {
0952 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
0953 channel_id, channel->state);
0954 return;
0955 }
0956
0957
0958 if (channel->state == IMEM_CHANNEL_RESERVED)
0959
0960 goto channel_free;
0961
0962 if (ipc_imem->phase == IPC_P_RUN) {
0963 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
0964 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
0965 }
0966
0967 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
0968 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
0969
0970 channel_free:
0971 ipc_imem_channel_free(channel);
0972 }
0973
0974 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
0975 int channel_id, u32 db_id)
0976 {
0977 struct ipc_mem_channel *channel;
0978
0979 if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
0980 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
0981 return NULL;
0982 }
0983
0984 channel = &ipc_imem->channels[channel_id];
0985
0986 channel->state = IMEM_CHANNEL_ACTIVE;
0987
0988 if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
0989 goto ul_pipe_err;
0990
0991 if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
0992 goto dl_pipe_err;
0993
0994
0995 if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
0996 &channel->dl_pipe, 0, false)) {
0997 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
0998 goto task_failed;
0999 }
1000
1001
1002 return channel;
1003 task_failed:
1004 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1005 dl_pipe_err:
1006 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1007 ul_pipe_err:
1008 ipc_imem_channel_free(channel);
1009 return NULL;
1010 }
1011
1012 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1013 {
1014 ipc_protocol_suspend(ipc_imem->ipc_protocol);
1015 }
1016
1017 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1018 {
1019 ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1020 }
1021
1022 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1023 {
1024 enum ipc_mem_exec_stage stage;
1025
1026 if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1027 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1028 ipc_imem_phase_update_check(ipc_imem, stage);
1029 }
1030 }
1031
1032 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1033 {
1034
1035 channel->state = IMEM_CHANNEL_FREE;
1036 }
1037
1038 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1039 enum ipc_ctype ctype)
1040 {
1041 struct ipc_mem_channel *channel;
1042 int i;
1043
1044
1045 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1046 channel = &ipc_imem->channels[i];
1047 if (channel->ctype == ctype && channel->index == index)
1048 break;
1049 }
1050
1051 if (i >= ipc_imem->nr_of_channels) {
1052 dev_dbg(ipc_imem->dev,
1053 "no channel definition for index=%d ctype=%d", index,
1054 ctype);
1055 return -ECHRNG;
1056 }
1057
1058 if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1059 dev_dbg(ipc_imem->dev, "channel is in use");
1060 return -EBUSY;
1061 }
1062
1063 if (channel->ctype == IPC_CTYPE_WWAN &&
1064 index == IPC_MEM_MUX_IP_CH_IF_ID)
1065 channel->if_id = index;
1066
1067 channel->channel_id = index;
1068 channel->state = IMEM_CHANNEL_RESERVED;
1069
1070 return i;
1071 }
1072
1073 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1074 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1075 {
1076 struct ipc_mem_channel *channel;
1077
1078 if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1079 chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1080 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1081 chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1082 return;
1083 }
1084
1085 if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1086 dev_err(ipc_imem->dev, "too many channels");
1087 return;
1088 }
1089
1090 channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1091 channel->channel_id = ipc_imem->nr_of_channels;
1092 channel->ctype = ctype;
1093 channel->index = chnl_cfg.id;
1094 channel->net_err_count = 0;
1095 channel->state = IMEM_CHANNEL_FREE;
1096 ipc_imem->nr_of_channels++;
1097
1098 ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1099 IRQ_MOD_OFF);
1100
1101 skb_queue_head_init(&channel->ul_list);
1102
1103 init_completion(&channel->ul_sem);
1104 }
1105
1106 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1107 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1108 {
1109 struct ipc_mem_channel *channel;
1110
1111 if (id < 0 || id >= ipc_imem->nr_of_channels) {
1112 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1113 return;
1114 }
1115
1116 channel = &ipc_imem->channels[id];
1117
1118 if (channel->state != IMEM_CHANNEL_FREE &&
1119 channel->state != IMEM_CHANNEL_RESERVED) {
1120 dev_err(ipc_imem->dev, "invalid channel state %d",
1121 channel->state);
1122 return;
1123 }
1124
1125 channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1126 channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1127 channel->ul_pipe.is_open = false;
1128 channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1129 channel->ul_pipe.channel = channel;
1130 channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1131 channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1132 channel->ul_pipe.irq_moderation = irq_moderation;
1133 channel->ul_pipe.buf_size = 0;
1134
1135 channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1136 channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1137 channel->dl_pipe.is_open = false;
1138 channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1139 channel->dl_pipe.channel = channel;
1140 channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1141 channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1142 channel->dl_pipe.irq_moderation = irq_moderation;
1143 channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1144 }
1145
1146 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1147 {
1148 int i;
1149
1150 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1151 struct ipc_mem_channel *channel;
1152
1153 channel = &ipc_imem->channels[i];
1154
1155 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1156 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1157
1158 ipc_imem_channel_free(channel);
1159 }
1160 }
1161
1162 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1163 {
1164 struct sk_buff *skb;
1165
1166
1167
1168
1169 pipe->is_open = false;
1170
1171
1172 while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1173 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1174
1175 ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1176 }
1177
1178
1179 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1180 {
1181 int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1182 enum ipc_mem_device_ipc_state ipc_state;
1183
1184
1185
1186
1187 if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1188
1189
1190
1191
1192 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1193 IPC_MEM_DEVICE_IPC_UNINIT);
1194 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1195
1196
1197
1198
1199 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1200 (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1201 (timeout > 0)) {
1202 usleep_range(1000, 1250);
1203 timeout--;
1204 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1205 }
1206 }
1207 }
1208
1209 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1210 {
1211 ipc_imem->phase = IPC_P_OFF_REQ;
1212
1213
1214 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1215
1216 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1217 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1218 hrtimer_cancel(&ipc_imem->fast_update_timer);
1219 hrtimer_cancel(&ipc_imem->startup_timer);
1220
1221
1222 cancel_work_sync(&ipc_imem->run_state_worker);
1223
1224 if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1225 ipc_mux_deinit(ipc_imem->mux);
1226 ipc_debugfs_deinit(ipc_imem);
1227 ipc_wwan_deinit(ipc_imem->wwan);
1228 ipc_port_deinit(ipc_imem->ipc_port);
1229 }
1230
1231 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1232 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1233
1234 ipc_imem_device_ipc_uninit(ipc_imem);
1235 ipc_imem_channel_reset(ipc_imem);
1236
1237 ipc_protocol_deinit(ipc_imem->ipc_protocol);
1238 ipc_task_deinit(ipc_imem->ipc_task);
1239
1240 kfree(ipc_imem->ipc_task);
1241 kfree(ipc_imem->mmio);
1242
1243 ipc_imem->phase = IPC_P_OFF;
1244 }
1245
1246
1247
1248
1249
1250
1251 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1252 {
1253 enum ipc_phase phase;
1254
1255
1256 init_completion(&ipc_imem->ul_pend_sem);
1257
1258 init_completion(&ipc_imem->dl_pend_sem);
1259
1260
1261 ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1262 ipc_imem->enter_runtime = 0;
1263
1264 phase = ipc_imem_phase_update(ipc_imem);
1265
1266
1267 switch (phase) {
1268 case IPC_P_ROM:
1269 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1270
1271 if (!hrtimer_active(&ipc_imem->startup_timer))
1272 hrtimer_start(&ipc_imem->startup_timer,
1273 ipc_imem->hrtimer_period,
1274 HRTIMER_MODE_REL);
1275 return 0;
1276
1277 case IPC_P_PSI:
1278 case IPC_P_EBL:
1279 case IPC_P_RUN:
1280
1281 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1282
1283
1284 if (ipc_imem->ipc_requested_state ==
1285 ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1286 ipc_imem_ipc_init_check(ipc_imem);
1287
1288 return 0;
1289 }
1290 dev_err(ipc_imem->dev,
1291 "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1292 ipc_mmio_get_ipc_state(ipc_imem->mmio));
1293 break;
1294 case IPC_P_CRASH:
1295 case IPC_P_CD_READY:
1296 dev_dbg(ipc_imem->dev,
1297 "Modem is in phase %d, reset Modem to collect CD",
1298 phase);
1299 return 0;
1300 default:
1301 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1302 break;
1303 }
1304
1305 complete(&ipc_imem->dl_pend_sem);
1306 complete(&ipc_imem->ul_pend_sem);
1307 ipc_imem->phase = IPC_P_OFF;
1308 return -EIO;
1309 }
1310
1311
1312 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1313 void __iomem *mmio, struct device *dev)
1314 {
1315 struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1316 enum ipc_mem_exec_stage stage;
1317
1318 if (!ipc_imem)
1319 return NULL;
1320
1321
1322 ipc_imem->pcie = pcie;
1323 ipc_imem->dev = dev;
1324
1325 ipc_imem->pci_device_id = device_id;
1326
1327 ipc_imem->cp_version = 0;
1328 ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1329
1330
1331 ipc_imem->nr_of_channels = 0;
1332
1333
1334 ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1335 if (!ipc_imem->mmio) {
1336 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1337 goto mmio_init_fail;
1338 }
1339
1340 ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1341 GFP_KERNEL);
1342
1343
1344 if (!ipc_imem->ipc_task)
1345 goto ipc_task_fail;
1346
1347 if (ipc_task_init(ipc_imem->ipc_task))
1348 goto ipc_task_init_fail;
1349
1350 ipc_imem->ipc_task->dev = ipc_imem->dev;
1351
1352 INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1353
1354 ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1355
1356 if (!ipc_imem->ipc_protocol)
1357 goto protocol_init_fail;
1358
1359
1360 ipc_imem->phase = IPC_P_OFF;
1361
1362 hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1363 HRTIMER_MODE_REL);
1364 ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1365
1366 hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1367 HRTIMER_MODE_REL);
1368 ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1369
1370 hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1371 HRTIMER_MODE_REL);
1372 ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1373
1374 hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1375 HRTIMER_MODE_REL);
1376 ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1377
1378 hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1379 ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1380
1381 if (ipc_imem_config(ipc_imem)) {
1382 dev_err(ipc_imem->dev, "failed to initialize the imem");
1383 goto imem_config_fail;
1384 }
1385
1386 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1387 if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1388
1389 ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1390 if (!ipc_imem->ipc_devlink) {
1391 dev_err(ipc_imem->dev, "Devlink register failed");
1392 goto imem_config_fail;
1393 }
1394
1395 if (ipc_flash_link_establish(ipc_imem))
1396 goto devlink_channel_fail;
1397
1398 set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1399 }
1400 return ipc_imem;
1401 devlink_channel_fail:
1402 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1403 imem_config_fail:
1404 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1405 hrtimer_cancel(&ipc_imem->fast_update_timer);
1406 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1407 hrtimer_cancel(&ipc_imem->startup_timer);
1408 protocol_init_fail:
1409 cancel_work_sync(&ipc_imem->run_state_worker);
1410 ipc_task_deinit(ipc_imem->ipc_task);
1411 ipc_task_init_fail:
1412 kfree(ipc_imem->ipc_task);
1413 ipc_task_fail:
1414 kfree(ipc_imem->mmio);
1415 mmio_init_fail:
1416 kfree(ipc_imem);
1417 return NULL;
1418 }
1419
1420 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1421 {
1422
1423 if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1424 ipc_imem->ev_irq_pending[irq] = true;
1425 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1426 NULL, 0, false);
1427 }
1428 }
1429
1430 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1431 {
1432 ipc_imem->td_update_timer_suspended = suspend;
1433 }
1434
1435
1436
1437
1438 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1439 int arg, void *msg,
1440 size_t msgsize)
1441 {
1442 enum ipc_mem_exec_stage stage;
1443 struct sk_buff *skb;
1444 int rc = -EINVAL;
1445 size_t size;
1446
1447
1448 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1449 if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1450 dev_err(ipc_imem->dev,
1451 "Execution_stage: expected BOOT, received = %X", stage);
1452 goto trigger_chip_info_fail;
1453 }
1454
1455 size = ipc_imem->mmio->chip_info_size;
1456 if (size > IOSM_CHIP_INFO_SIZE_MAX)
1457 goto trigger_chip_info_fail;
1458
1459 skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1460 if (!skb) {
1461 dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1462 rc = -ENOMEM;
1463 goto trigger_chip_info_fail;
1464 }
1465
1466 ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1467
1468 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1469 ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1470 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1471 rc = 0;
1472 trigger_chip_info_fail:
1473 return rc;
1474 }
1475
1476 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1477 {
1478 return ipc_task_queue_send_task(ipc_imem,
1479 ipc_imem_devlink_trigger_chip_info_cb,
1480 0, NULL, 0, true);
1481 }