0001
0002
0003
0004
0005
0006 #include "iosm_ipc_mux_codec.h"
0007
0008
0009 static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
0010 {
0011 int channel_id;
0012
0013 channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
0014 IPC_CTYPE_WWAN);
0015
0016 if (channel_id < 0) {
0017 dev_err(ipc_mux->dev,
0018 "allocation of the MUX channel id failed");
0019 ipc_mux->state = MUX_S_ERROR;
0020 ipc_mux->event = MUX_E_NOT_APPLICABLE;
0021 goto no_channel;
0022 }
0023
0024
0025 ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
0026 IPC_HP_NET_CHANNEL_INIT);
0027
0028 if (!ipc_mux->channel) {
0029 dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
0030 ipc_mux->state = MUX_S_ERROR;
0031 ipc_mux->event = MUX_E_NOT_APPLICABLE;
0032 return -ENODEV;
0033 }
0034
0035
0036 ipc_mux->state = MUX_S_ACTIVE;
0037 ipc_mux->event = MUX_E_NO_ORDERS;
0038
0039 no_channel:
0040 return channel_id;
0041 }
0042
0043
0044 static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
0045 {
0046 struct mux_session *if_entry;
0047
0048 if_entry = &ipc_mux->session[if_id];
0049
0050 if_entry->wwan = NULL;
0051 }
0052
0053
0054 static struct mux_cmd_open_session_resp *
0055 ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
0056 {
0057 struct mux_cmd_open_session_resp *open_session_resp;
0058 struct mux_acb *acb = &ipc_mux->acb;
0059 union mux_cmd_param param;
0060
0061
0062 param.open_session.flow_ctrl = 0;
0063 param.open_session.ipv4v6_hints = 0;
0064 param.open_session.reserved2 = 0;
0065 param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
0066
0067
0068
0069
0070 acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
0071 if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
0072 ¶m, sizeof(param.open_session), true,
0073 false) ||
0074 acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
0075 dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
0076 if_id);
0077 return NULL;
0078 }
0079
0080 open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
0081 if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
0082 dev_err(ipc_mux->dev,
0083 "if_id %d,session open failed,response=%d", if_id,
0084 open_session_resp->response);
0085 return NULL;
0086 }
0087
0088 return open_session_resp;
0089 }
0090
0091
0092 static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
0093 struct mux_session_open *session_open)
0094 {
0095 struct mux_cmd_open_session_resp *open_session_resp;
0096 int if_id;
0097
0098
0099 if_id = le32_to_cpu(session_open->if_id);
0100 if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
0101 dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
0102 return false;
0103 }
0104
0105
0106
0107
0108 open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
0109 if (!open_session_resp) {
0110 ipc_mux_session_free(ipc_mux, if_id);
0111 session_open->if_id = cpu_to_le32(-1);
0112 return false;
0113 }
0114
0115
0116 skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
0117
0118 ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
0119 ipc_mux->session[if_id].ul_head_pad_len =
0120 le32_to_cpu(open_session_resp->ul_head_pad_len);
0121 ipc_mux->session[if_id].wwan = ipc_mux->wwan;
0122
0123
0124 ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
0125 ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
0126 ipc_mux->session[if_id].ul_flow_credits = 0;
0127 ipc_mux->session[if_id].net_tx_stop = false;
0128 ipc_mux->session[if_id].flow_ctl_mask = 0;
0129
0130
0131 session_open->if_id = cpu_to_le32(if_id);
0132 ipc_mux->nr_sessions++;
0133
0134 return true;
0135 }
0136
0137
0138 static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
0139 {
0140
0141 ipc_mux_session_free(ipc_mux, if_id);
0142
0143
0144 skb_queue_purge(&ipc_mux->session[if_id].ul_list);
0145 }
0146
0147 static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
0148 struct mux_session_close *msg)
0149 {
0150 int if_id;
0151
0152
0153 if_id = le32_to_cpu(msg->if_id);
0154
0155 if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
0156 dev_err(ipc_mux->dev, "invalid session id %d", if_id);
0157 return;
0158 }
0159
0160
0161
0162
0163 if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
0164 NULL, 0, true, false))
0165 dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
0166 if_id);
0167
0168
0169 ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
0170 ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
0171 ipc_mux->session[if_id].flow_ctl_mask = 0;
0172
0173 ipc_mux_session_reset(ipc_mux, if_id);
0174 ipc_mux->nr_sessions--;
0175 }
0176
0177 static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
0178 struct mux_channel_close *channel_close_p)
0179 {
0180 int i;
0181
0182
0183 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
0184 if (ipc_mux->session[i].wwan)
0185 ipc_mux_session_reset(ipc_mux, i);
0186
0187 ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
0188
0189
0190 ipc_mux->state = MUX_S_INACTIVE;
0191 ipc_mux->event = MUX_E_INACTIVE;
0192 }
0193
0194
0195 static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
0196 {
0197 enum mux_event order;
0198 bool success;
0199 int ret = -EIO;
0200
0201 if (!ipc_mux->initialized) {
0202 ret = -EAGAIN;
0203 goto out;
0204 }
0205
0206 order = msg->common.event;
0207
0208 switch (ipc_mux->state) {
0209 case MUX_S_INACTIVE:
0210 if (order != MUX_E_MUX_SESSION_OPEN)
0211 goto out;
0212
0213 if (ipc_mux->event == MUX_E_INACTIVE)
0214
0215 ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
0216
0217 if (ipc_mux->state != MUX_S_ACTIVE) {
0218 ret = ipc_mux->channel_id;
0219 goto out;
0220 }
0221
0222
0223 ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
0224 ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
0225 success = ipc_mux_session_open(ipc_mux, &msg->session_open);
0226
0227 ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
0228 if (success)
0229 ret = ipc_mux->channel_id;
0230 goto out;
0231
0232 case MUX_S_ACTIVE:
0233 switch (order) {
0234 case MUX_E_MUX_SESSION_OPEN:
0235
0236 ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
0237 ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
0238 success = ipc_mux_session_open(ipc_mux,
0239 &msg->session_open);
0240 ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
0241 if (success)
0242 ret = ipc_mux->channel_id;
0243 goto out;
0244
0245 case MUX_E_MUX_SESSION_CLOSE:
0246
0247 ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
0248 ipc_mux_session_close(ipc_mux, &msg->session_close);
0249 if (!ipc_mux->nr_sessions) {
0250 ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
0251 ipc_mux_channel_close(ipc_mux,
0252 &msg->channel_close);
0253 }
0254 ret = ipc_mux->channel_id;
0255 goto out;
0256
0257 case MUX_E_MUX_CHANNEL_CLOSE:
0258
0259 ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
0260 ipc_mux_channel_close(ipc_mux, &msg->channel_close);
0261 ret = ipc_mux->channel_id;
0262 goto out;
0263
0264 default:
0265
0266 goto out;
0267 }
0268
0269 default:
0270 dev_err(ipc_mux->dev,
0271 "unexpected MUX transition: state=%d, event=%d",
0272 ipc_mux->state, ipc_mux->event);
0273 }
0274 out:
0275 return ret;
0276 }
0277
0278 struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
0279 struct iosm_imem *imem)
0280 {
0281 struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
0282 int i, j, ul_tds, ul_td_size;
0283 struct sk_buff_head *free_list;
0284 struct sk_buff *skb;
0285 int qlt_size;
0286
0287 if (!ipc_mux)
0288 return NULL;
0289
0290 ipc_mux->protocol = mux_cfg->protocol;
0291 ipc_mux->ul_flow = mux_cfg->ul_flow;
0292 ipc_mux->instance_id = mux_cfg->instance_id;
0293 ipc_mux->wwan_q_offset = 0;
0294
0295 ipc_mux->pcie = imem->pcie;
0296 ipc_mux->imem = imem;
0297 ipc_mux->ipc_protocol = imem->ipc_protocol;
0298 ipc_mux->dev = imem->dev;
0299 ipc_mux->wwan = imem->wwan;
0300
0301
0302 free_list = &ipc_mux->ul_adb.free_list;
0303
0304
0305 skb_queue_head_init(free_list);
0306
0307 ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
0308
0309 ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
0310
0311 ipc_mux->ul_adb.dest_skb = NULL;
0312
0313 ipc_mux->initialized = true;
0314 ipc_mux->adb_prep_ongoing = false;
0315 ipc_mux->size_needed = 0;
0316 ipc_mux->ul_data_pend_bytes = 0;
0317 ipc_mux->state = MUX_S_INACTIVE;
0318 ipc_mux->ev_mux_net_transmit_pending = false;
0319 ipc_mux->tx_transaction_id = 0;
0320 ipc_mux->rr_next_session = 0;
0321 ipc_mux->event = MUX_E_INACTIVE;
0322 ipc_mux->channel_id = -1;
0323 ipc_mux->channel = NULL;
0324
0325 if (ipc_mux->protocol != MUX_LITE) {
0326 qlt_size = offsetof(struct mux_qlth, ql) +
0327 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
0328
0329 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
0330 ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
0331 GFP_ATOMIC);
0332 if (!ipc_mux->ul_adb.pp_qlt[i]) {
0333 for (j = i - 1; j >= 0; j--)
0334 kfree(ipc_mux->ul_adb.pp_qlt[j]);
0335 return NULL;
0336 }
0337 }
0338
0339 ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
0340 ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
0341 }
0342
0343
0344 for (i = 0; i < ul_tds; i++) {
0345 dma_addr_t mapping;
0346
0347 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
0348 &mapping, DMA_TO_DEVICE, 0);
0349 if (!skb) {
0350 ipc_mux_deinit(ipc_mux);
0351 return NULL;
0352 }
0353
0354 skb_queue_tail(free_list, skb);
0355 }
0356
0357 return ipc_mux;
0358 }
0359
0360
0361
0362
0363 static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
0364 {
0365 struct mux_session *session;
0366 int idx;
0367
0368 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
0369 session = &ipc_mux->session[idx];
0370
0371 if (!session->wwan)
0372 continue;
0373
0374
0375
0376
0377
0378 if (session->flow_ctl_mask == 0) {
0379 session->net_tx_stop = false;
0380 ipc_mux_netif_tx_flowctrl(session, idx, false);
0381 }
0382 }
0383 }
0384
0385
0386
0387
0388 static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
0389 {
0390 struct mux_session *session;
0391 int idx;
0392
0393 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
0394 session = &ipc_mux->session[idx];
0395
0396 if (!session->wwan)
0397 continue;
0398
0399 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
0400 }
0401 }
0402
0403 void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
0404 {
0405 if (ipc_mux->ul_flow == MUX_UL) {
0406 int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
0407
0408 if (ipc_mux->ul_data_pend_bytes < low_thresh)
0409 ipc_mux_restart_tx_for_all_sessions(ipc_mux);
0410 }
0411 }
0412
0413 int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
0414 {
0415 return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
0416 }
0417
0418 enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
0419 {
0420 return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
0421 }
0422
0423 int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
0424 {
0425 struct mux_session_open *session_open;
0426 union mux_msg mux_msg;
0427
0428 session_open = &mux_msg.session_open;
0429 session_open->event = MUX_E_MUX_SESSION_OPEN;
0430
0431 session_open->if_id = cpu_to_le32(session_nr);
0432 ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
0433 return ipc_mux_schedule(ipc_mux, &mux_msg);
0434 }
0435
0436 int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
0437 {
0438 struct mux_session_close *session_close;
0439 union mux_msg mux_msg;
0440 int ret_val;
0441
0442 session_close = &mux_msg.session_close;
0443 session_close->event = MUX_E_MUX_SESSION_CLOSE;
0444
0445 session_close->if_id = cpu_to_le32(session_nr);
0446 ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
0447 ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
0448
0449 return ret_val;
0450 }
0451
0452 void ipc_mux_deinit(struct iosm_mux *ipc_mux)
0453 {
0454 struct mux_channel_close *channel_close;
0455 struct sk_buff_head *free_list;
0456 union mux_msg mux_msg;
0457 struct sk_buff *skb;
0458
0459 if (!ipc_mux->initialized)
0460 return;
0461 ipc_mux_stop_netif_for_all_sessions(ipc_mux);
0462
0463 if (ipc_mux->state == MUX_S_ACTIVE) {
0464 channel_close = &mux_msg.channel_close;
0465 channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
0466 ipc_mux_schedule(ipc_mux, &mux_msg);
0467 }
0468
0469
0470 free_list = &ipc_mux->ul_adb.free_list;
0471
0472
0473 while ((skb = skb_dequeue(free_list)))
0474 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
0475
0476 if (ipc_mux->channel) {
0477 ipc_mux->channel->ul_pipe.is_open = false;
0478 ipc_mux->channel->dl_pipe.is_open = false;
0479 }
0480
0481 kfree(ipc_mux);
0482 }