0001
0002
0003
0004
0005
0006 #include <linux/nospec.h>
0007
0008 #include "iosm_ipc_imem_ops.h"
0009 #include "iosm_ipc_mux_codec.h"
0010 #include "iosm_ipc_task_queue.h"
0011
0012
0013 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
0014 size_t size)
0015 {
0016 struct iosm_mux *ipc_mux = ipc_imem->mux;
0017 const struct mux_acb *acb = msg;
0018
0019 skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
0020 ipc_imem_ul_send(ipc_mux->imem);
0021
0022 return 0;
0023 }
0024
0025 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
0026 {
0027 struct completion *completion = &ipc_mux->channel->ul_sem;
0028 int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
0029 0, &ipc_mux->acb,
0030 sizeof(ipc_mux->acb), false);
0031 if (ret) {
0032 dev_err(ipc_mux->dev, "unable to send mux command");
0033 return ret;
0034 }
0035
0036
0037
0038
0039 if (blocking) {
0040 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
0041
0042 reinit_completion(completion);
0043
0044 if (wait_for_completion_interruptible_timeout
0045 (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
0046 0) {
0047 dev_err(ipc_mux->dev, "ch[%d] timeout",
0048 ipc_mux->channel_id);
0049 ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
0050 return -ETIMEDOUT;
0051 }
0052 }
0053
0054 return 0;
0055 }
0056
0057
0058 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
0059 {
0060 struct mux_acb *acb = &ipc_mux->acb;
0061 struct mux_acbh *header;
0062
0063 header = (struct mux_acbh *)(acb->skb)->data;
0064 header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
0065 header->first_cmd_index = header->block_length;
0066 header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
0067 header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
0068 }
0069
0070
0071 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
0072 void *param, u32 param_size)
0073 {
0074 struct mux_acbh *header;
0075 struct mux_cmdh *cmdh;
0076 struct mux_acb *acb;
0077
0078 acb = &ipc_mux->acb;
0079 header = (struct mux_acbh *)(acb->skb)->data;
0080 cmdh = (struct mux_cmdh *)
0081 ((acb->skb)->data + le32_to_cpu(header->block_length));
0082
0083 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
0084 cmdh->command_type = cpu_to_le32(cmd);
0085 cmdh->if_id = acb->if_id;
0086
0087 acb->cmd = cmd;
0088 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
0089 param_size);
0090 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
0091 if (param)
0092 memcpy(&cmdh->param, param, param_size);
0093
0094 skb_put(acb->skb, le32_to_cpu(header->block_length) +
0095 le16_to_cpu(cmdh->cmd_len));
0096
0097 return cmdh;
0098 }
0099
0100
0101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
0102 u32 cmd, struct mux_acb *acb,
0103 void *param, u32 param_size)
0104 {
0105 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
0106
0107 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
0108 cmdh->command_type = cpu_to_le32(cmd);
0109 cmdh->if_id = acb->if_id;
0110
0111 acb->cmd = cmd;
0112
0113 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
0114 param_size);
0115 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
0116
0117 if (param)
0118 memcpy(&cmdh->param, param, param_size);
0119
0120 skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
0121
0122 return cmdh;
0123 }
0124
0125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
0126 {
0127 struct mux_acb *acb = &ipc_mux->acb;
0128 struct sk_buff *skb;
0129 dma_addr_t mapping;
0130
0131
0132 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
0133 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
0134 if (!skb)
0135 return -ENOMEM;
0136
0137
0138 acb->skb = skb;
0139
0140 memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
0141
0142 return 0;
0143 }
0144
0145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
0146 u32 transaction_id, union mux_cmd_param *param,
0147 size_t res_size, bool blocking, bool respond)
0148 {
0149 struct mux_acb *acb = &ipc_mux->acb;
0150 union mux_type_cmdh cmdh;
0151 int ret = 0;
0152
0153 acb->if_id = if_id;
0154 ret = ipc_mux_acb_alloc(ipc_mux);
0155 if (ret)
0156 return ret;
0157
0158 if (ipc_mux->protocol == MUX_LITE) {
0159 cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
0160 param, res_size);
0161
0162 if (respond)
0163 cmdh.ack_lite->transaction_id =
0164 cpu_to_le32(transaction_id);
0165 } else {
0166
0167 ipc_mux_acb_init(ipc_mux);
0168 cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
0169 res_size);
0170
0171 if (respond)
0172 cmdh.ack_aggr->transaction_id =
0173 cpu_to_le32(transaction_id);
0174 }
0175 ret = ipc_mux_acb_send(ipc_mux, blocking);
0176
0177 return ret;
0178 }
0179
0180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
0181 {
0182
0183 ipc_wwan_tx_flowctrl(session->wwan, idx, on);
0184 }
0185
0186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
0187 union mux_cmd_param param,
0188 __le32 command_type, u8 if_id,
0189 __le32 transaction_id)
0190 {
0191 struct mux_acb *acb = &ipc_mux->acb;
0192
0193 switch (le32_to_cpu(command_type)) {
0194 case MUX_CMD_OPEN_SESSION_RESP:
0195 case MUX_CMD_CLOSE_SESSION_RESP:
0196
0197 acb->got_param = param;
0198 break;
0199
0200 case MUX_LITE_CMD_FLOW_CTL_ACK:
0201
0202
0203
0204 if (ipc_mux->protocol != MUX_LITE)
0205 return -EINVAL;
0206
0207 dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
0208 if_id, le32_to_cpu(transaction_id));
0209 break;
0210
0211 case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
0212
0213
0214
0215 if (ipc_mux->protocol == MUX_LITE)
0216 return -EINVAL;
0217 break;
0218
0219 default:
0220 return -EINVAL;
0221 }
0222
0223 acb->wanted_response = MUX_CMD_INVALID;
0224 acb->got_response = le32_to_cpu(command_type);
0225 complete(&ipc_mux->channel->ul_sem);
0226
0227 return 0;
0228 }
0229
0230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
0231 union mux_cmd_param *param,
0232 __le32 command_type, u8 if_id,
0233 __le16 cmd_len, int size)
0234 {
0235 struct mux_session *session;
0236 struct hrtimer *adb_timer;
0237
0238 dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
0239 if_id, le32_to_cpu(command_type));
0240
0241 switch (le32_to_cpu(command_type)) {
0242 case MUX_LITE_CMD_FLOW_CTL:
0243 case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
0244
0245 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
0246 dev_err(ipc_mux->dev, "if_id [%d] not valid",
0247 if_id);
0248 return -EINVAL;
0249 }
0250
0251 session = &ipc_mux->session[if_id];
0252 adb_timer = &ipc_mux->imem->adb_timer;
0253
0254 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
0255
0256 if (cmd_len == cpu_to_le16(size))
0257 session->flow_ctl_mask =
0258 le32_to_cpu(param->flow_ctl.mask);
0259 else
0260 session->flow_ctl_mask = ~0;
0261
0262
0263
0264
0265 session->net_tx_stop = true;
0266
0267
0268
0269
0270
0271
0272 if (ipc_mux->protocol == MUX_AGGREGATION) {
0273 ipc_mux_ul_adb_finish(ipc_mux);
0274 ipc_imem_hrtimer_stop(adb_timer);
0275 }
0276
0277 session->flow_ctl_en_cnt++;
0278 } else if (param->flow_ctl.mask == 0) {
0279
0280
0281
0282
0283
0284 dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
0285 if_id, le32_to_cpu(param->flow_ctl.mask));
0286
0287 if (cmd_len == cpu_to_le16(size))
0288 session->flow_ctl_mask =
0289 le32_to_cpu(param->flow_ctl.mask);
0290 else
0291 session->flow_ctl_mask = 0;
0292
0293 session->flow_ctl_dis_cnt++;
0294 } else {
0295 break;
0296 }
0297
0298 ipc_mux->acc_adb_size = 0;
0299 ipc_mux->acc_payload_size = 0;
0300
0301 dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
0302 le32_to_cpu(param->flow_ctl.mask));
0303 break;
0304
0305 case MUX_LITE_CMD_LINK_STATUS_REPORT:
0306 break;
0307
0308 default:
0309 return -EINVAL;
0310 }
0311 return 0;
0312 }
0313
0314
0315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
0316 {
0317 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
0318 __le32 trans_id = cmdh->transaction_id;
0319 int size;
0320
0321 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
0322 cmdh->command_type, cmdh->if_id,
0323 cmdh->transaction_id)) {
0324
0325
0326
0327 size = offsetof(struct mux_lite_cmdh, param) +
0328 sizeof(cmdh->param.flow_ctl);
0329 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
0330 cmdh->command_type,
0331 cmdh->if_id,
0332 cmdh->cmd_len, size)) {
0333
0334
0335
0336 union mux_cmd_param *mux_cmd = NULL;
0337 size_t size = 0;
0338 u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
0339
0340 if (cmdh->command_type ==
0341 cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
0342 mux_cmd = &cmdh->param;
0343 mux_cmd->link_status_resp.response =
0344 cpu_to_le32(MUX_CMD_RESP_SUCCESS);
0345
0346 size = sizeof(u32);
0347 } else if (cmdh->command_type ==
0348 cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
0349 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
0350 } else {
0351 return;
0352 }
0353
0354 if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
0355 le32_to_cpu(trans_id),
0356 mux_cmd, size, false,
0357 true))
0358 dev_err(ipc_mux->dev,
0359 "if_id %d: cmd send failed",
0360 cmdh->if_id);
0361 }
0362 }
0363 }
0364
0365
0366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
0367 struct iosm_wwan *wwan, u32 offset,
0368 u8 service_class, struct sk_buff *skb)
0369 {
0370 struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
0371
0372 if (!dest_skb)
0373 return -ENOMEM;
0374
0375 skb_pull(dest_skb, offset);
0376 skb_set_tail_pointer(dest_skb, dest_skb->len);
0377
0378 dest_skb->priority = service_class;
0379
0380 return ipc_wwan_receive(wwan, dest_skb, false, if_id);
0381 }
0382
0383
0384 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
0385 unsigned char *block)
0386 {
0387 struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
0388 struct iosm_wwan *wwan;
0389 int ul_credits;
0390 int if_id;
0391
0392 if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
0393 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
0394 fct->vfl_length);
0395 return;
0396 }
0397
0398 if_id = fct->if_id;
0399 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
0400 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
0401 return;
0402 }
0403
0404
0405 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
0406 wwan = ipc_mux->session[if_id].wwan;
0407 if (!wwan) {
0408 dev_err(ipc_mux->dev, "session Net ID is NULL");
0409 return;
0410 }
0411
0412 ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
0413
0414 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
0415 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
0416
0417
0418 ipc_mux->session[if_id].ul_flow_credits += ul_credits;
0419
0420
0421 if (ipc_mux->session[if_id].ul_flow_credits > 0) {
0422 ipc_mux->session[if_id].net_tx_stop = false;
0423 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
0424 ipc_mux->session[if_id].if_id, false);
0425 }
0426 }
0427
0428
0429 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
0430 struct sk_buff *skb)
0431 {
0432 u32 pad_len, packet_offset;
0433 struct iosm_wwan *wwan;
0434 struct mux_adgh *adgh;
0435 u8 *block = skb->data;
0436 int rc = 0;
0437 u8 if_id;
0438
0439 adgh = (struct mux_adgh *)block;
0440
0441 if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
0442 dev_err(ipc_mux->dev, "invalid ADGH signature received");
0443 return;
0444 }
0445
0446 if_id = adgh->if_id;
0447 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
0448 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
0449 return;
0450 }
0451
0452
0453 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
0454 wwan = ipc_mux->session[if_id].wwan;
0455 if (!wwan) {
0456 dev_err(ipc_mux->dev, "session Net ID is NULL");
0457 return;
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 pad_len =
0469 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
0470 packet_offset = sizeof(*adgh) + pad_len;
0471
0472 if_id += ipc_mux->wwan_q_offset;
0473
0474
0475 rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
0476 adgh->service_class, skb);
0477 if (rc) {
0478 dev_err(ipc_mux->dev, "mux adgh decoding error");
0479 return;
0480 }
0481 ipc_mux->session[if_id].flush = 1;
0482 }
0483
0484 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
0485 struct mux_cmdh *cmdh, int size)
0486 {
0487 u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
0488 u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
0489 u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
0490 u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
0491 union mux_cmd_param *cmd_p = NULL;
0492 u32 cmd = link_st;
0493 u32 trans_id;
0494
0495 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
0496 cmdh->command_type, cmdh->if_id,
0497 cmdh->cmd_len, size)) {
0498 size = 0;
0499 if (cmdh->command_type == cpu_to_le32(link_st)) {
0500 cmd_p = &cmdh->param;
0501 cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
0502 } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
0503 (cmdh->command_type == cpu_to_le32(fctl_dis))) {
0504 cmd = fctl_ack;
0505 } else {
0506 return;
0507 }
0508 trans_id = le32_to_cpu(cmdh->transaction_id);
0509 ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
0510 trans_id, cmd_p, size, false, true);
0511 }
0512 }
0513
0514
0515 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
0516 {
0517 struct mux_acbh *acbh;
0518 struct mux_cmdh *cmdh;
0519 u32 next_cmd_index;
0520 u8 *block;
0521 int size;
0522
0523 acbh = (struct mux_acbh *)(skb->data);
0524 block = (u8 *)(skb->data);
0525
0526 next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
0527 next_cmd_index = array_index_nospec(next_cmd_index,
0528 sizeof(struct mux_cmdh));
0529
0530 while (next_cmd_index != 0) {
0531 cmdh = (struct mux_cmdh *)&block[next_cmd_index];
0532 next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
0533 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
0534 cmdh->command_type,
0535 cmdh->if_id,
0536 cmdh->transaction_id)) {
0537 size = offsetof(struct mux_cmdh, param) +
0538 sizeof(cmdh->param.flow_ctl);
0539 ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
0540 }
0541 }
0542 }
0543
0544
0545 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
0546 struct mux_adth_dg *dg, struct sk_buff *skb,
0547 int if_id, int nr_of_dg)
0548 {
0549 u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
0550 u32 packet_offset, i, rc;
0551
0552 for (i = 0; i < nr_of_dg; i++, dg++) {
0553 if (le32_to_cpu(dg->datagram_index)
0554 < sizeof(struct mux_adbh))
0555 goto dg_error;
0556
0557
0558 if (le32_to_cpu(dg->datagram_index) >=
0559 le32_to_cpu(adbh->block_length)) {
0560 goto dg_error;
0561 } else {
0562 packet_offset =
0563 le32_to_cpu(dg->datagram_index) +
0564 dl_head_pad_len;
0565
0566 rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
0567 packet_offset,
0568 dg->service_class,
0569 skb);
0570 if (rc)
0571 goto dg_error;
0572 }
0573 }
0574 return 0;
0575 dg_error:
0576 return -1;
0577 }
0578
0579
0580 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
0581 struct sk_buff *skb)
0582 {
0583 struct mux_adth_dg *dg;
0584 struct iosm_wwan *wwan;
0585 struct mux_adbh *adbh;
0586 struct mux_adth *adth;
0587 int nr_of_dg, if_id;
0588 u32 adth_index;
0589 u8 *block;
0590
0591 block = skb->data;
0592 adbh = (struct mux_adbh *)block;
0593
0594
0595 adth_index = le32_to_cpu(adbh->first_table_index);
0596
0597
0598 if (adth_index < 1) {
0599 dev_err(ipc_mux->dev, "unexpected empty ADB");
0600 goto adb_decode_err;
0601 }
0602
0603
0604 while (adth_index) {
0605
0606 adth = (struct mux_adth *)(block + adth_index);
0607
0608
0609 if_id = adth->if_id;
0610 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
0611 goto adb_decode_err;
0612
0613 if_id = array_index_nospec(if_id,
0614 IPC_MEM_MUX_IP_SESSION_ENTRIES);
0615
0616
0617 wwan = ipc_mux->session[if_id].wwan;
0618 if (!wwan)
0619 goto adb_decode_err;
0620
0621
0622 if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
0623 goto adb_decode_err;
0624
0625 if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
0626 sizeof(struct mux_adth_dg)))
0627 goto adb_decode_err;
0628
0629
0630 nr_of_dg = (le16_to_cpu(adth->table_length) -
0631 sizeof(struct mux_adth) +
0632 sizeof(struct mux_adth_dg)) /
0633 sizeof(struct mux_adth_dg);
0634
0635
0636 if (nr_of_dg < 1) {
0637 dev_err(ipc_mux->dev,
0638 "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
0639 adth_index, nr_of_dg,
0640 le32_to_cpu(adth->next_table_index));
0641
0642
0643 adth_index = le32_to_cpu(adth->next_table_index);
0644 continue;
0645 }
0646
0647
0648 dg = &adth->dg;
0649 if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
0650 nr_of_dg) < 0)
0651 goto adb_decode_err;
0652
0653
0654 ipc_mux->session[if_id].flush = 1;
0655
0656
0657 adth_index = le32_to_cpu(adth->next_table_index);
0658 }
0659
0660 adb_decode_err:
0661 return;
0662 }
0663
0664
0665
0666
0667
0668
0669
0670 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
0671 {
0672 u32 signature;
0673
0674 if (!skb->data)
0675 return;
0676
0677
0678 signature = le32_to_cpup((__le32 *)skb->data);
0679
0680 switch (signature) {
0681 case IOSM_AGGR_MUX_SIG_ADBH:
0682 mux_dl_adb_decode(ipc_mux, skb);
0683 break;
0684 case IOSM_AGGR_MUX_SIG_ADGH:
0685 ipc_mux_dl_adgh_decode(ipc_mux, skb);
0686 break;
0687 case MUX_SIG_FCTH:
0688 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
0689 break;
0690 case IOSM_AGGR_MUX_SIG_ACBH:
0691 ipc_mux_dl_acb_decode(ipc_mux, skb);
0692 break;
0693 case MUX_SIG_CMDH:
0694 ipc_mux_dl_cmd_decode(ipc_mux, skb);
0695 break;
0696
0697 default:
0698 dev_err(ipc_mux->dev, "invalid ABH signature");
0699 }
0700
0701 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
0702 }
0703
0704 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
0705 struct mux_adb *ul_adb, u32 type)
0706 {
0707
0708 struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
0709 u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
0710 u32 *next_tb_id;
0711 int qlt_size;
0712 u32 if_id;
0713
0714 if (!skb)
0715 return -EBUSY;
0716
0717
0718 IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
0719
0720 switch (type) {
0721 case IOSM_AGGR_MUX_SIG_ADBH:
0722
0723 ul_adb->dest_skb = skb;
0724 ul_adb->buf = skb->data;
0725 ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
0726
0727
0728 ul_adb->if_cnt = 0;
0729 ul_adb->payload_size = 0;
0730 ul_adb->dg_cnt_total = 0;
0731
0732
0733 ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
0734 memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
0735 ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
0736 ul_adb->adbh->block_length =
0737 cpu_to_le32(sizeof(struct mux_adbh));
0738 next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
0739 ul_adb->next_table_index = next_tb_id;
0740
0741
0742 memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
0743
0744
0745 for (if_id = 0; if_id < no_if; if_id++) {
0746 ul_adb->dg_count[if_id] = 0;
0747 ul_adb->qlt_updated[if_id] = 0;
0748 }
0749 break;
0750
0751 case IOSM_AGGR_MUX_SIG_ADGH:
0752
0753 ul_adb->dest_skb = skb;
0754 ul_adb->buf = skb->data;
0755 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
0756
0757 ul_adb->if_cnt = 0;
0758 ul_adb->payload_size = 0;
0759 ul_adb->dg_cnt_total = 0;
0760
0761 ul_adb->adgh = (struct mux_adgh *)skb->data;
0762 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
0763 break;
0764
0765 case MUX_SIG_QLTH:
0766 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
0767 (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
0768
0769 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
0770 dev_err(ipc_mux->dev,
0771 "can't support. QLT size:%d SKB size: %d",
0772 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
0773 return -ERANGE;
0774 }
0775
0776 ul_adb->qlth_skb = skb;
0777 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
0778 skb_put(skb, qlt_size);
0779 break;
0780 }
0781
0782 return 0;
0783 }
0784
0785 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
0786 {
0787 struct mux_adb *ul_adb = &ipc_mux->ul_adb;
0788 u16 adgh_len;
0789 long long bytes;
0790 char *str;
0791
0792 if (!ul_adb->dest_skb) {
0793 dev_err(ipc_mux->dev, "no dest skb");
0794 return;
0795 }
0796
0797 adgh_len = le16_to_cpu(ul_adb->adgh->length);
0798 skb_put(ul_adb->dest_skb, adgh_len);
0799 skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
0800 ul_adb->dest_skb = NULL;
0801
0802 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
0803 struct mux_session *session;
0804
0805 session = &ipc_mux->session[ul_adb->adgh->if_id];
0806 str = "available_credits";
0807 bytes = (long long)session->ul_flow_credits;
0808
0809 } else {
0810 str = "pend_bytes";
0811 bytes = ipc_mux->ul_data_pend_bytes;
0812 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
0813 adgh_len;
0814 }
0815
0816 dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
0817 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
0818 str, bytes);
0819 }
0820
0821 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
0822 struct mux_adb *ul_adb, int *out_offset)
0823 {
0824 int i, qlt_size, offset = *out_offset;
0825 struct mux_qlth *p_adb_qlt;
0826 struct mux_adth_dg *dg;
0827 struct mux_adth *adth;
0828 u16 adth_dg_size;
0829 u32 *next_tb_id;
0830
0831 qlt_size = offsetof(struct mux_qlth, ql) +
0832 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
0833
0834 for (i = 0; i < ipc_mux->nr_sessions; i++) {
0835 if (ul_adb->dg_count[i] > 0) {
0836 adth_dg_size = offsetof(struct mux_adth, dg) +
0837 ul_adb->dg_count[i] * sizeof(*dg);
0838
0839 *ul_adb->next_table_index = offset;
0840 adth = (struct mux_adth *)&ul_adb->buf[offset];
0841 next_tb_id = (unsigned int *)&adth->next_table_index;
0842 ul_adb->next_table_index = next_tb_id;
0843 offset += adth_dg_size;
0844 adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
0845 adth->if_id = i;
0846 adth->table_length = cpu_to_le16(adth_dg_size);
0847 adth_dg_size -= offsetof(struct mux_adth, dg);
0848 memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
0849 ul_adb->if_cnt++;
0850 }
0851
0852 if (ul_adb->qlt_updated[i]) {
0853 *ul_adb->next_table_index = offset;
0854 p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
0855 ul_adb->next_table_index =
0856 (u32 *)&p_adb_qlt->next_table_index;
0857 memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
0858 offset += qlt_size;
0859 }
0860 }
0861 *out_offset = offset;
0862 }
0863
0864
0865
0866
0867
0868 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
0869 {
0870 bool ul_data_pend = false;
0871 struct mux_adb *ul_adb;
0872 unsigned long flags;
0873 int offset;
0874
0875 ul_adb = &ipc_mux->ul_adb;
0876 if (!ul_adb->dest_skb)
0877 return;
0878
0879 offset = *ul_adb->next_table_index;
0880 ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
0881 ul_adb->adbh->block_length = cpu_to_le32(offset);
0882
0883 if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
0884 ul_adb->dest_skb = NULL;
0885 return;
0886 }
0887
0888 *ul_adb->next_table_index = 0;
0889 ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
0890 skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
0891
0892 spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
0893 __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
0894 spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
0895
0896 ul_adb->dest_skb = NULL;
0897
0898 ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
0899
0900
0901 if (ul_data_pend)
0902 ipc_imem_td_update_timer_start(ipc_mux->imem);
0903
0904 ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
0905 ipc_mux->acc_payload_size += ul_adb->payload_size;
0906 ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
0907 }
0908
0909
0910 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
0911 struct mux_adb *adb, int *size_needed,
0912 u32 type)
0913 {
0914 bool ret_val = false;
0915 int status;
0916
0917 if (!adb->dest_skb) {
0918
0919
0920
0921 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
0922 if (status)
0923
0924 ret_val = true;
0925
0926
0927 *size_needed = 0;
0928 }
0929
0930 return ret_val;
0931 }
0932
0933
0934
0935
0936 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
0937 {
0938 struct mux_session *session;
0939 int idx;
0940
0941 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
0942 session = &ipc_mux->session[idx];
0943
0944 if (!session->wwan)
0945 continue;
0946
0947 session->net_tx_stop = true;
0948 }
0949 }
0950
0951
0952 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
0953 {
0954 struct ipc_mem_lite_gen_tbl *qlt;
0955 struct mux_session *session;
0956 bool qlt_updated = false;
0957 int i;
0958 int qlt_size;
0959
0960 if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
0961 return qlt_updated;
0962
0963 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
0964 MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
0965
0966 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
0967 session = &ipc_mux->session[i];
0968
0969 if (!session->wwan || session->flow_ctl_mask)
0970 continue;
0971
0972 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
0973 MUX_SIG_QLTH)) {
0974 dev_err(ipc_mux->dev,
0975 "no reserved mem to send QLT of if_id: %d", i);
0976 break;
0977 }
0978
0979
0980 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
0981 ->data;
0982 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
0983 qlt->length = cpu_to_le16(qlt_size);
0984 qlt->if_id = i;
0985 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
0986 qlt->reserved[0] = 0;
0987 qlt->reserved[1] = 0;
0988
0989 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
0990
0991
0992 skb_queue_tail(&ipc_mux->channel->ul_list,
0993 ipc_mux->ul_adb.qlth_skb);
0994
0995 qlt_updated = true;
0996 ipc_mux->ul_adb.qlth_skb = NULL;
0997 }
0998
0999 if (qlt_updated)
1000
1001 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1002
1003 return qlt_updated;
1004 }
1005
1006
1007
1008
1009 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1010 struct mux_session *session,
1011 struct sk_buff_head *ul_list,
1012 int max_nr_of_pkts)
1013 {
1014 int pkts_to_send = 0;
1015 struct sk_buff *skb;
1016 int credits = 0;
1017
1018 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1019 credits = session->ul_flow_credits;
1020 if (credits <= 0) {
1021 dev_dbg(ipc_mux->dev,
1022 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1023 session->if_id, session->ul_flow_credits,
1024 session->ul_list.qlen);
1025 return 0;
1026 }
1027 } else {
1028 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1029 ipc_mux->ul_data_pend_bytes;
1030 if (credits <= 0) {
1031 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1032
1033 dev_dbg(ipc_mux->dev,
1034 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1035 session->if_id, ipc_mux->ul_data_pend_bytes,
1036 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1037 return 0;
1038 }
1039 }
1040
1041
1042
1043
1044
1045 skb_queue_walk(ul_list, skb)
1046 {
1047 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1048 break;
1049 credits -= skb->len;
1050 pkts_to_send++;
1051 }
1052
1053 return pkts_to_send;
1054 }
1055
1056
1057 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1058 struct mux_session *session,
1059 struct sk_buff_head *ul_list,
1060 struct mux_adb *adb, int nr_of_pkts)
1061 {
1062 int offset = sizeof(struct mux_adgh);
1063 int adb_updated = -EINVAL;
1064 struct sk_buff *src_skb;
1065 int aligned_size = 0;
1066 int nr_of_skb = 0;
1067 u32 pad_len = 0;
1068
1069
1070
1071
1072 nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1073 nr_of_pkts);
1074
1075
1076
1077
1078 if (nr_of_pkts <= 0)
1079 return 0;
1080
1081
1082 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1083 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1084
1085
1086
1087
1088 while (nr_of_pkts > 0) {
1089
1090 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1091 IOSM_AGGR_MUX_SIG_ADGH)) {
1092 dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1093 return -ENOMEM;
1094 }
1095
1096
1097 src_skb = skb_peek(ul_list);
1098 if (!src_skb) {
1099 dev_err(ipc_mux->dev,
1100 "skb peek return NULL with count : %d",
1101 nr_of_pkts);
1102 break;
1103 }
1104
1105
1106 aligned_size = ALIGN((pad_len + src_skb->len), 4);
1107
1108 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1109
1110 if (ipc_mux->size_needed > adb->size) {
1111 dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1112 ipc_mux->size_needed, adb->size);
1113
1114
1115
1116 return nr_of_skb ? 1 : 0;
1117 }
1118
1119
1120 memcpy(adb->buf + offset + pad_len, src_skb->data,
1121 src_skb->len);
1122
1123 adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1124 adb->adgh->if_id = session_id;
1125 adb->adgh->length =
1126 cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1127 src_skb->len);
1128 adb->adgh->service_class = src_skb->priority;
1129 adb->adgh->next_count = --nr_of_pkts;
1130 adb->dg_cnt_total++;
1131 adb->payload_size += src_skb->len;
1132
1133 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1134
1135
1136
1137 session->ul_flow_credits -= src_skb->len;
1138
1139
1140 src_skb = skb_dequeue(ul_list);
1141 dev_kfree_skb(src_skb);
1142 nr_of_skb++;
1143
1144 ipc_mux_ul_adgh_finish(ipc_mux);
1145 }
1146
1147 if (nr_of_skb) {
1148
1149
1150
1151 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1152 ipc_mux->ul_data_pend_bytes >=
1153 IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1154 adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1155 else
1156 adb_updated = 1;
1157
1158
1159 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1160 }
1161
1162 return adb_updated;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1174 int session_id, int qlth_n_ql_size,
1175 struct sk_buff_head *ul_list)
1176 {
1177 int qlevel = ul_list->qlen;
1178 struct mux_qlth *p_qlt;
1179
1180 p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1181
1182
1183 if (p_adb->qlt_updated[session_id] == 0) {
1184 p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1185 p_qlt->if_id = session_id;
1186 p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1187 p_qlt->reserved = 0;
1188 p_qlt->reserved2 = 0;
1189 }
1190
1191
1192 p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1193 p_adb->qlt_updated[session_id] = 1;
1194 }
1195
1196
1197 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1198 int session_id,
1199 struct sk_buff_head *ul_list,
1200 struct mux_adth_dg *dg,
1201 int aligned_size,
1202 u32 qlth_n_ql_size,
1203 struct mux_adb *adb,
1204 struct sk_buff *src_skb)
1205 {
1206 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1207 qlth_n_ql_size, ul_list);
1208 ipc_mux_ul_adb_finish(ipc_mux);
1209 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1210 IOSM_AGGR_MUX_SIG_ADBH)) {
1211 dev_kfree_skb(src_skb);
1212 return -ENOMEM;
1213 }
1214 ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1215
1216 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1217 ipc_mux->size_needed += qlth_n_ql_size;
1218 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1219 return 0;
1220 }
1221
1222
1223 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1224 struct mux_adth_dg *dg,
1225 struct sk_buff_head *ul_list,
1226 struct sk_buff *src_skb, int session_id,
1227 int pkt_to_send, u32 qlth_n_ql_size,
1228 int *out_offset, int head_pad_len)
1229 {
1230 int aligned_size;
1231 int offset = *out_offset;
1232 unsigned long flags;
1233 int nr_of_skb = 0;
1234
1235 while (pkt_to_send > 0) {
1236
1237 src_skb = skb_peek(ul_list);
1238 if (!src_skb) {
1239 dev_err(ipc_mux->dev,
1240 "skb peek return NULL with count : %d",
1241 pkt_to_send);
1242 return -1;
1243 }
1244 aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1245 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1246
1247 if (ipc_mux->size_needed > adb->size ||
1248 ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1249 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1250 *adb->next_table_index = offset;
1251 if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1252 ul_list, dg,
1253 aligned_size,
1254 qlth_n_ql_size, adb,
1255 src_skb) < 0)
1256 return -ENOMEM;
1257 nr_of_skb = 0;
1258 offset = le32_to_cpu(adb->adbh->block_length);
1259
1260 dg = adb->dg[session_id] + adb->dg_count[session_id];
1261 }
1262
1263 memcpy(adb->buf + offset + head_pad_len,
1264 src_skb->data, src_skb->len);
1265
1266 dg->datagram_index = cpu_to_le32(offset);
1267 dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1268 dg->service_class = (((struct sk_buff *)src_skb)->priority);
1269 dg->reserved = 0;
1270 adb->dg_cnt_total++;
1271 adb->payload_size += le16_to_cpu(dg->datagram_length);
1272 dg++;
1273 adb->dg_count[session_id]++;
1274 offset += aligned_size;
1275
1276 spin_lock_irqsave(&ul_list->lock, flags);
1277 src_skb = __skb_dequeue(ul_list);
1278 spin_unlock_irqrestore(&ul_list->lock, flags);
1279
1280 dev_kfree_skb(src_skb);
1281 nr_of_skb++;
1282 pkt_to_send--;
1283 }
1284 *out_offset = offset;
1285 return nr_of_skb;
1286 }
1287
1288
1289 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1290 struct mux_session *session,
1291 struct sk_buff_head *ul_list, struct mux_adb *adb,
1292 int pkt_to_send)
1293 {
1294 int adb_updated = -EINVAL;
1295 int head_pad_len, offset;
1296 struct sk_buff *src_skb = NULL;
1297 struct mux_adth_dg *dg;
1298 u32 qlth_n_ql_size;
1299
1300
1301
1302
1303 if (ipc_mux->ul_data_pend_bytes >=
1304 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1305 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1306 return adb_updated;
1307 }
1308
1309 qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1310 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1311 head_pad_len = session->ul_head_pad_len;
1312
1313 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1314 head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1315
1316 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1317 IOSM_AGGR_MUX_SIG_ADBH))
1318 return -ENOMEM;
1319
1320 offset = le32_to_cpu(adb->adbh->block_length);
1321
1322 if (ipc_mux->size_needed == 0)
1323 ipc_mux->size_needed = offset;
1324
1325
1326 if (adb->dg_count[session_id] == 0) {
1327 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1328 ipc_mux->size_needed += qlth_n_ql_size;
1329 }
1330
1331 dg = adb->dg[session_id] + adb->dg_count[session_id];
1332
1333 if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1334 session_id, pkt_to_send, qlth_n_ql_size, &offset,
1335 head_pad_len) > 0) {
1336 adb_updated = 1;
1337 *adb->next_table_index = offset;
1338 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1339 qlth_n_ql_size, ul_list);
1340 adb->adbh->block_length = cpu_to_le32(offset);
1341 }
1342
1343 return adb_updated;
1344 }
1345
1346 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1347 {
1348 struct sk_buff_head *ul_list;
1349 struct mux_session *session;
1350 int updated = 0;
1351 int session_id;
1352 int dg_n;
1353 int i;
1354
1355 if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1356 ipc_mux->adb_prep_ongoing)
1357 return false;
1358
1359 ipc_mux->adb_prep_ongoing = true;
1360
1361 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1362 session_id = ipc_mux->rr_next_session;
1363 session = &ipc_mux->session[session_id];
1364
1365
1366 ipc_mux->rr_next_session++;
1367 if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1368 ipc_mux->rr_next_session = 0;
1369
1370 if (!session->wwan || session->flow_ctl_mask ||
1371 session->net_tx_stop)
1372 continue;
1373
1374 ul_list = &session->ul_list;
1375
1376
1377 dg_n = skb_queue_len(ul_list);
1378 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1379 dg_n = MUX_MAX_UL_DG_ENTRIES;
1380
1381 if (dg_n == 0)
1382
1383
1384
1385 continue;
1386 if (ipc_mux->protocol == MUX_LITE)
1387 updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1388 session, ul_list,
1389 &ipc_mux->ul_adb,
1390 dg_n);
1391 else
1392 updated = mux_ul_adb_encode(ipc_mux, session_id,
1393 session, ul_list,
1394 &ipc_mux->ul_adb,
1395 dg_n);
1396 }
1397
1398 ipc_mux->adb_prep_ongoing = false;
1399 return updated == 1;
1400 }
1401
1402
1403 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1404 struct mux_adbh *p_adbh)
1405 {
1406 struct mux_adth_dg *dg;
1407 struct mux_adth *adth;
1408 u32 payload_size = 0;
1409 u32 next_table_idx;
1410 int nr_of_dg, i;
1411
1412
1413 next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1414
1415 if (next_table_idx < sizeof(struct mux_adbh)) {
1416 dev_err(ipc_mux->dev, "unexpected empty ADB");
1417 return payload_size;
1418 }
1419
1420 while (next_table_idx != 0) {
1421
1422 adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1423
1424 if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1425 nr_of_dg = (le16_to_cpu(adth->table_length) -
1426 sizeof(struct mux_adth) +
1427 sizeof(struct mux_adth_dg)) /
1428 sizeof(struct mux_adth_dg);
1429
1430 if (nr_of_dg <= 0)
1431 return payload_size;
1432
1433 dg = &adth->dg;
1434
1435 for (i = 0; i < nr_of_dg; i++, dg++) {
1436 if (le32_to_cpu(dg->datagram_index) <
1437 sizeof(struct mux_adbh)) {
1438 return payload_size;
1439 }
1440 payload_size +=
1441 le16_to_cpu(dg->datagram_length);
1442 }
1443 }
1444 next_table_idx = le32_to_cpu(adth->next_table_index);
1445 }
1446
1447 return payload_size;
1448 }
1449
1450 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1451 {
1452 union mux_type_header hr;
1453 u16 adgh_len;
1454 int payload;
1455
1456 if (ipc_mux->protocol == MUX_LITE) {
1457 hr.adgh = (struct mux_adgh *)skb->data;
1458 adgh_len = le16_to_cpu(hr.adgh->length);
1459 if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460 ipc_mux->ul_flow == MUX_UL)
1461 ipc_mux->ul_data_pend_bytes =
1462 ipc_mux->ul_data_pend_bytes - adgh_len;
1463 } else {
1464 hr.adbh = (struct mux_adbh *)(skb->data);
1465 payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466 ipc_mux->ul_data_pend_bytes -= payload;
1467 }
1468
1469 if (ipc_mux->ul_flow == MUX_UL)
1470 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471 ipc_mux->ul_data_pend_bytes);
1472
1473
1474 skb->tail = 0;
1475 skb->len = 0;
1476
1477
1478 skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1479 }
1480
1481
1482 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1483 void *msg, size_t size)
1484 {
1485 struct iosm_mux *ipc_mux = ipc_imem->mux;
1486 bool ul_data_pend = false;
1487
1488
1489 ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1490 if (ul_data_pend) {
1491 if (ipc_mux->protocol == MUX_AGGREGATION)
1492 ipc_imem_adb_timer_start(ipc_mux->imem);
1493
1494
1495 ipc_imem_td_update_timer_start(ipc_mux->imem);
1496 }
1497
1498 ipc_mux->ev_mux_net_transmit_pending = false;
1499
1500 return 0;
1501 }
1502
1503 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1504 struct sk_buff *skb)
1505 {
1506 struct mux_session *session = &ipc_mux->session[if_id];
1507 int ret = -EINVAL;
1508
1509 if (ipc_mux->channel &&
1510 ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1511 dev_err(ipc_mux->dev,
1512 "channel state is not IMEM_CHANNEL_ACTIVE");
1513 goto out;
1514 }
1515
1516 if (!session->wwan) {
1517 dev_err(ipc_mux->dev, "session net ID is NULL");
1518 ret = -EFAULT;
1519 goto out;
1520 }
1521
1522
1523
1524
1525
1526 if (skb_queue_len(&session->ul_list) >=
1527 (session->net_tx_stop ?
1528 IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1529 (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1530 IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1531 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1532 ret = -EBUSY;
1533 goto out;
1534 }
1535
1536
1537 skb_queue_tail(&session->ul_list, skb);
1538
1539
1540 if (!ipc_mux->ev_mux_net_transmit_pending) {
1541 ipc_mux->ev_mux_net_transmit_pending = true;
1542 ret = ipc_task_queue_send_task(ipc_mux->imem,
1543 ipc_mux_tq_ul_trigger_encode, 0,
1544 NULL, 0, false);
1545 if (ret)
1546 goto out;
1547 }
1548 dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1549 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1550 skb->len, skb->truesize, skb->priority);
1551 ret = 0;
1552 out:
1553 return ret;
1554 }