0001
0002
0003
0004
0005
0006 #include <linux/delay.h>
0007
0008 #include "iosm_ipc_chnl_cfg.h"
0009 #include "iosm_ipc_devlink.h"
0010 #include "iosm_ipc_imem.h"
0011 #include "iosm_ipc_imem_ops.h"
0012 #include "iosm_ipc_port.h"
0013 #include "iosm_ipc_task_queue.h"
0014
0015
0016 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
0017 {
0018 dev_dbg(ipc_imem->dev, "%s if id: %d",
0019 ipc_imem_phase_get_string(ipc_imem->phase), if_id);
0020
0021
0022 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
0023 dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
0024 ipc_imem_phase_get_string(ipc_imem->phase));
0025 return -EIO;
0026 }
0027
0028 return ipc_mux_open_session(ipc_imem->mux, if_id);
0029 }
0030
0031
0032 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
0033 int channel_id)
0034 {
0035 if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
0036 if_id <= IP_MUX_SESSION_END)
0037 ipc_mux_close_session(ipc_imem->mux, if_id);
0038 }
0039
0040
0041 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
0042 void *msg, size_t size)
0043 {
0044 ipc_imem_ul_send(ipc_imem);
0045
0046 return 0;
0047 }
0048
0049
0050 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
0051 {
0052 return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
0053 NULL, 0, false);
0054 }
0055
0056
0057 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
0058 int if_id, int channel_id, struct sk_buff *skb)
0059 {
0060 int ret = -EINVAL;
0061
0062 if (!ipc_imem || channel_id < 0)
0063 goto out;
0064
0065
0066 if (ipc_imem->phase != IPC_P_RUN) {
0067 dev_dbg(ipc_imem->dev, "phase %s transmit",
0068 ipc_imem_phase_get_string(ipc_imem->phase));
0069 ret = -EIO;
0070 goto out;
0071 }
0072
0073
0074 ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
0075 out:
0076 return ret;
0077 }
0078
0079
0080 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
0081 enum ipc_mux_protocol mux_type)
0082 {
0083 struct ipc_chnl_cfg chnl_cfg = { 0 };
0084
0085 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
0086
0087
0088 if (ipc_imem->cp_version == -1) {
0089 dev_err(ipc_imem->dev, "invalid CP version");
0090 return;
0091 }
0092
0093 ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
0094 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
0095 IRQ_MOD_OFF);
0096
0097
0098 ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
0099 if (!ipc_imem->wwan)
0100 dev_err(ipc_imem->dev,
0101 "failed to register the ipc_wwan interfaces");
0102 }
0103
0104
0105 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
0106 struct sk_buff *skb)
0107 {
0108 struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
0109 char *buf = skb->data;
0110 int len = skb->len;
0111 dma_addr_t mapping;
0112 int ret;
0113
0114 ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
0115
0116 if (ret)
0117 goto err;
0118
0119 BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
0120
0121 IPC_CB(skb)->mapping = mapping;
0122 IPC_CB(skb)->direction = DMA_TO_DEVICE;
0123 IPC_CB(skb)->len = len;
0124 IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
0125
0126 err:
0127 return ret;
0128 }
0129
0130
0131 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
0132 struct ipc_mem_channel *channel)
0133 {
0134 enum ipc_phase phase;
0135
0136
0137 phase = ipc_imem->phase;
0138
0139
0140 switch (phase) {
0141 case IPC_P_RUN:
0142 case IPC_P_PSI:
0143 case IPC_P_EBL:
0144 break;
0145
0146 case IPC_P_ROM:
0147
0148
0149
0150 if (channel->state != IMEM_CHANNEL_RESERVED) {
0151 dev_err(ipc_imem->dev,
0152 "ch[%d]:invalid channel state %d,expected %d",
0153 channel->channel_id, channel->state,
0154 IMEM_CHANNEL_RESERVED);
0155 goto channel_unavailable;
0156 }
0157 goto channel_available;
0158
0159 default:
0160
0161 dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
0162 channel->channel_id, phase);
0163 goto channel_unavailable;
0164 }
0165
0166 if (channel->state != IMEM_CHANNEL_ACTIVE) {
0167 dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
0168 channel->channel_id, channel->state);
0169 goto channel_unavailable;
0170 }
0171
0172 channel_available:
0173 return true;
0174
0175 channel_unavailable:
0176 return false;
0177 }
0178
0179
0180
0181
0182
0183
0184 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
0185 struct ipc_mem_channel *channel)
0186 {
0187 enum ipc_phase curr_phase;
0188 int status = 0;
0189 u32 tail = 0;
0190
0191 curr_phase = ipc_imem->phase;
0192
0193
0194
0195
0196 if (curr_phase == IPC_P_OFF) {
0197 dev_err(ipc_imem->dev,
0198 "nothing to do. Current Phase: %s",
0199 ipc_imem_phase_get_string(curr_phase));
0200 return;
0201 }
0202
0203 if (channel->state == IMEM_CHANNEL_FREE) {
0204 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
0205 channel->channel_id, channel->state);
0206 return;
0207 }
0208
0209
0210
0211
0212 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
0213 ipc_imem->app_notify_ul_pend = 1;
0214
0215
0216
0217
0218 status = wait_for_completion_interruptible_timeout
0219 (&ipc_imem->ul_pend_sem,
0220 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
0221 if (status == 0) {
0222 dev_dbg(ipc_imem->dev,
0223 "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
0224 channel->ul_pipe.pipe_nr,
0225 channel->ul_pipe.old_head,
0226 channel->ul_pipe.old_tail);
0227 }
0228
0229 ipc_imem->app_notify_ul_pend = 0;
0230 }
0231
0232
0233
0234
0235 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
0236 &channel->dl_pipe, NULL, &tail);
0237
0238 if (tail != channel->dl_pipe.old_tail) {
0239 ipc_imem->app_notify_dl_pend = 1;
0240
0241
0242
0243
0244 status = wait_for_completion_interruptible_timeout
0245 (&ipc_imem->dl_pend_sem,
0246 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
0247 if (status == 0) {
0248 dev_dbg(ipc_imem->dev,
0249 "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
0250 channel->dl_pipe.pipe_nr,
0251 channel->dl_pipe.old_head,
0252 channel->dl_pipe.old_tail);
0253 }
0254
0255 ipc_imem->app_notify_dl_pend = 0;
0256 }
0257
0258
0259
0260
0261
0262
0263
0264 channel->state = IMEM_CHANNEL_CLOSING;
0265
0266 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
0267 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
0268
0269 ipc_imem_channel_free(channel);
0270 }
0271
0272
0273 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
0274 int chl_id, int hp_id)
0275 {
0276 struct ipc_mem_channel *channel;
0277 int ch_id;
0278
0279
0280 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
0281 dev_err(ipc_imem->dev, "PORT open refused, phase %s",
0282 ipc_imem_phase_get_string(ipc_imem->phase));
0283 return NULL;
0284 }
0285
0286 ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
0287
0288 if (ch_id < 0) {
0289 dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
0290 return NULL;
0291 }
0292
0293 channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
0294
0295 if (!channel) {
0296 dev_err(ipc_imem->dev, "PORT channel id open failed");
0297 return NULL;
0298 }
0299
0300 return channel;
0301 }
0302
0303
0304 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
0305 {
0306 struct ipc_mem_channel *channel = ipc_cdev->channel;
0307 struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
0308 int ret = -EIO;
0309
0310 if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
0311 ipc_imem->phase == IPC_P_OFF_REQ)
0312 goto out;
0313
0314 ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
0315
0316 if (ret)
0317 goto out;
0318
0319
0320 skb_queue_tail(&channel->ul_list, skb);
0321
0322 ret = ipc_imem_call_cdev_write(ipc_imem);
0323
0324 if (ret) {
0325 skb_dequeue_tail(&channel->ul_list);
0326 dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
0327 ipc_cdev->channel->channel_id);
0328 }
0329 out:
0330 return ret;
0331 }
0332
0333
0334 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
0335 {
0336 struct ipc_mem_channel *channel;
0337 enum ipc_phase phase;
0338 int channel_id;
0339
0340 phase = ipc_imem_phase_update(ipc_imem);
0341 switch (phase) {
0342 case IPC_P_OFF:
0343 case IPC_P_ROM:
0344
0345 channel_id = ipc_imem_channel_alloc(ipc_imem,
0346 IPC_MEM_CTRL_CHL_ID_7,
0347 IPC_CTYPE_CTRL);
0348
0349 if (channel_id < 0) {
0350 dev_err(ipc_imem->dev,
0351 "reservation of a flash channel id failed");
0352 goto error;
0353 }
0354
0355 ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
0356 channel = &ipc_imem->channels[channel_id];
0357
0358
0359 if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
0360 dev_err(ipc_imem->dev, "Enqueue of chip info failed");
0361 channel->state = IMEM_CHANNEL_FREE;
0362 goto error;
0363 }
0364
0365 return channel;
0366
0367 case IPC_P_PSI:
0368 case IPC_P_EBL:
0369 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
0370 if (ipc_imem->cp_version == -1) {
0371 dev_err(ipc_imem->dev, "invalid CP version");
0372 goto error;
0373 }
0374
0375 channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
0376 return ipc_imem_channel_open(ipc_imem, channel_id,
0377 IPC_HP_CDEV_OPEN);
0378
0379 default:
0380
0381 dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
0382 }
0383 error:
0384 return NULL;
0385 }
0386
0387
0388 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
0389 {
0390 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
0391 int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
0392 enum ipc_mem_exec_stage exec_stage;
0393 struct ipc_mem_channel *channel;
0394 int status = 0;
0395 u32 tail = 0;
0396
0397 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
0398
0399 do {
0400 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
0401 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
0402 exec_stage == IPC_MEM_EXEC_STAGE_PSI)
0403 break;
0404 msleep(20);
0405 boot_check_timeout -= 20;
0406 } while (boot_check_timeout > 0);
0407
0408
0409
0410
0411 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
0412 status = wait_for_completion_interruptible_timeout
0413 (&ipc_imem->ul_pend_sem,
0414 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
0415 if (status == 0) {
0416 dev_dbg(ipc_imem->dev,
0417 "Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
0418 channel->ul_pipe.pipe_nr,
0419 channel->ul_pipe.old_head,
0420 channel->ul_pipe.old_tail);
0421 }
0422 }
0423
0424 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
0425 &channel->dl_pipe, NULL, &tail);
0426
0427 if (tail != channel->dl_pipe.old_tail) {
0428 status = wait_for_completion_interruptible_timeout
0429 (&ipc_imem->dl_pend_sem,
0430 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
0431 if (status == 0) {
0432 dev_dbg(ipc_imem->dev,
0433 "Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
0434 channel->dl_pipe.pipe_nr,
0435 channel->dl_pipe.old_head,
0436 channel->dl_pipe.old_tail);
0437 }
0438 }
0439
0440
0441
0442
0443
0444
0445
0446 channel->state = IMEM_CHANNEL_CLOSING;
0447
0448 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
0449 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
0450 ipc_imem->nr_of_channels--;
0451 }
0452
0453 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
0454 struct sk_buff *skb)
0455 {
0456 skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
0457 complete(&ipc_devlink->devlink_sio.read_sem);
0458 }
0459
0460
0461 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
0462 struct ipc_mem_channel *channel,
0463 unsigned char *buf, int count)
0464 {
0465 int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
0466 enum ipc_mem_exec_stage exec_stage;
0467
0468 dma_addr_t mapping = 0;
0469 int ret;
0470
0471 ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
0472 DMA_TO_DEVICE);
0473 if (ret)
0474 goto pcie_addr_map_fail;
0475
0476
0477
0478
0479 ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
0480 ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
0481
0482 ret = wait_for_completion_interruptible_timeout
0483 (&channel->ul_sem,
0484 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
0485
0486 if (ret <= 0) {
0487 dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
0488 ret);
0489 goto psi_transfer_fail;
0490 }
0491
0492 if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
0493 ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
0494 ret = (-1) * ((int)ipc_imem->rom_exit_code);
0495 goto psi_transfer_fail;
0496 }
0497
0498 dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
0499
0500
0501
0502
0503
0504 do {
0505 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
0506
0507 if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
0508 break;
0509
0510 msleep(20);
0511 psi_start_timeout -= 20;
0512 } while (psi_start_timeout > 0);
0513
0514 if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
0515 goto psi_transfer_fail;
0516
0517 ipc_imem->phase = IPC_P_PSI;
0518
0519
0520 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
0521
0522
0523
0524
0525 ipc_imem_ipc_init_check(ipc_imem);
0526
0527 ret = wait_for_completion_interruptible_timeout
0528 (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
0529 if (ret <= 0) {
0530 dev_err(ipc_imem->dev,
0531 "Failed PSI RUNNING state on CP, Error-%d", ret);
0532 goto psi_transfer_fail;
0533 }
0534
0535 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
0536 IPC_MEM_DEVICE_IPC_RUNNING) {
0537 dev_err(ipc_imem->dev,
0538 "ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
0539 channel->channel_id,
0540 ipc_imem_phase_get_string(ipc_imem->phase),
0541 ipc_mmio_get_ipc_state(ipc_imem->mmio));
0542
0543 goto psi_transfer_fail;
0544 }
0545
0546
0547 if (!ipc_imem_sys_devlink_open(ipc_imem)) {
0548 dev_err(ipc_imem->dev, "can't open flash_channel");
0549 goto psi_transfer_fail;
0550 }
0551
0552 ret = 0;
0553 psi_transfer_fail:
0554 ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
0555 pcie_addr_map_fail:
0556 return ret;
0557 }
0558
0559 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
0560 unsigned char *buf, int count)
0561 {
0562 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
0563 struct ipc_mem_channel *channel;
0564 struct sk_buff *skb;
0565 dma_addr_t mapping;
0566 int ret;
0567
0568 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
0569
0570
0571
0572
0573 if (ipc_imem->phase == IPC_P_ROM) {
0574 ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
0575
0576
0577
0578 if (ret > 0)
0579 ipc_imem_msg_send_feature_set(ipc_imem,
0580 IPC_MEM_INBAND_CRASH_SIG,
0581 false);
0582 goto out;
0583 }
0584
0585
0586 skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
0587 DMA_TO_DEVICE, 0);
0588 if (!skb) {
0589 ret = -ENOMEM;
0590 goto out;
0591 }
0592
0593 memcpy(skb_put(skb, count), buf, count);
0594
0595 IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
0596
0597
0598 skb_queue_tail(&channel->ul_list, skb);
0599
0600
0601 if (!ipc_imem_call_cdev_write(ipc_imem)) {
0602 ret = wait_for_completion_interruptible(&channel->ul_sem);
0603
0604 if (ret < 0) {
0605 dev_err(ipc_imem->dev,
0606 "ch[%d] no CP confirmation, status = %d",
0607 channel->channel_id, ret);
0608 ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
0609 goto out;
0610 }
0611 }
0612 ret = 0;
0613 out:
0614 return ret;
0615 }
0616
0617 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
0618 u32 bytes_to_read, u32 *bytes_read)
0619 {
0620 struct sk_buff *skb = NULL;
0621 int rc = 0;
0622
0623
0624 devlink->devlink_sio.devlink_read_pend = 1;
0625 while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
0626 if (!wait_for_completion_interruptible_timeout
0627 (&devlink->devlink_sio.read_sem,
0628 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
0629 dev_err(devlink->dev, "Read timedout");
0630 rc = -ETIMEDOUT;
0631 goto devlink_read_fail;
0632 }
0633 }
0634 devlink->devlink_sio.devlink_read_pend = 0;
0635 if (bytes_to_read < skb->len) {
0636 dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
0637 rc = -EINVAL;
0638 goto devlink_read_fail;
0639 }
0640 *bytes_read = skb->len;
0641 memcpy(data, skb->data, skb->len);
0642
0643 devlink_read_fail:
0644 dev_kfree_skb(skb);
0645 return rc;
0646 }