Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2021, MediaTek Inc.
0004  * Copyright (c) 2021-2022, Intel Corporation.
0005  *
0006  * Authors:
0007  *  Amir Hanania <amir.hanania@intel.com>
0008  *  Haijun Liu <haijun.liu@mediatek.com>
0009  *  Moises Veleta <moises.veleta@intel.com>
0010  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
0011  *
0012  * Contributors:
0013  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
0014  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
0015  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
0016  *  Eliot Lee <eliot.lee@intel.com>
0017  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
0018  */
0019 
0020 #include <linux/bits.h>
0021 #include <linux/bitfield.h>
0022 #include <linux/device.h>
0023 #include <linux/gfp.h>
0024 #include <linux/kernel.h>
0025 #include <linux/kthread.h>
0026 #include <linux/list.h>
0027 #include <linux/mutex.h>
0028 #include <linux/netdevice.h>
0029 #include <linux/skbuff.h>
0030 #include <linux/spinlock.h>
0031 #include <linux/wait.h>
0032 #include <linux/wwan.h>
0033 
0034 #include "t7xx_hif_cldma.h"
0035 #include "t7xx_modem_ops.h"
0036 #include "t7xx_port.h"
0037 #include "t7xx_port_proxy.h"
0038 #include "t7xx_state_monitor.h"
0039 
0040 #define Q_IDX_CTRL          0
0041 #define Q_IDX_MBIM          2
0042 #define Q_IDX_AT_CMD            5
0043 
0044 #define INVALID_SEQ_NUM         GENMASK(15, 0)
0045 
0046 #define for_each_proxy_port(i, p, proxy)    \
0047     for (i = 0, (p) = &(proxy)->ports[i];   \
0048          i < (proxy)->port_count;       \
0049          i++, (p) = &(proxy)->ports[i])
0050 
0051 static const struct t7xx_port_conf t7xx_md_port_conf[] = {
0052     {
0053         .tx_ch = PORT_CH_UART2_TX,
0054         .rx_ch = PORT_CH_UART2_RX,
0055         .txq_index = Q_IDX_AT_CMD,
0056         .rxq_index = Q_IDX_AT_CMD,
0057         .txq_exp_index = 0xff,
0058         .rxq_exp_index = 0xff,
0059         .path_id = CLDMA_ID_MD,
0060         .ops = &wwan_sub_port_ops,
0061         .name = "AT",
0062         .port_type = WWAN_PORT_AT,
0063     }, {
0064         .tx_ch = PORT_CH_MBIM_TX,
0065         .rx_ch = PORT_CH_MBIM_RX,
0066         .txq_index = Q_IDX_MBIM,
0067         .rxq_index = Q_IDX_MBIM,
0068         .path_id = CLDMA_ID_MD,
0069         .ops = &wwan_sub_port_ops,
0070         .name = "MBIM",
0071         .port_type = WWAN_PORT_MBIM,
0072     }, {
0073         .tx_ch = PORT_CH_CONTROL_TX,
0074         .rx_ch = PORT_CH_CONTROL_RX,
0075         .txq_index = Q_IDX_CTRL,
0076         .rxq_index = Q_IDX_CTRL,
0077         .path_id = CLDMA_ID_MD,
0078         .ops = &ctl_port_ops,
0079         .name = "t7xx_ctrl",
0080     },
0081 };
0082 
0083 static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
0084 {
0085     const struct t7xx_port_conf *port_conf;
0086     struct t7xx_port *port;
0087     int i;
0088 
0089     for_each_proxy_port(i, port, port_prox) {
0090         port_conf = port->port_conf;
0091         if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
0092             return port;
0093     }
0094 
0095     return NULL;
0096 }
0097 
0098 static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
0099 {
0100     u32 status = le32_to_cpu(ccci_h->status);
0101     u16 seq_num, next_seq_num;
0102     bool assert_bit;
0103 
0104     seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
0105     next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
0106     assert_bit = status & CCCI_H_AST_BIT;
0107     if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
0108         return next_seq_num;
0109 
0110     if (seq_num != port->seq_nums[MTK_RX])
0111         dev_warn_ratelimited(port->dev,
0112                      "seq num out-of-order %u != %u (header %X, len %X)\n",
0113                      seq_num, port->seq_nums[MTK_RX],
0114                      le32_to_cpu(ccci_h->packet_header),
0115                      le32_to_cpu(ccci_h->packet_len));
0116 
0117     return next_seq_num;
0118 }
0119 
0120 void t7xx_port_proxy_reset(struct port_proxy *port_prox)
0121 {
0122     struct t7xx_port *port;
0123     int i;
0124 
0125     for_each_proxy_port(i, port, port_prox) {
0126         port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
0127         port->seq_nums[MTK_TX] = 0;
0128     }
0129 }
0130 
0131 static int t7xx_port_get_queue_no(struct t7xx_port *port)
0132 {
0133     const struct t7xx_port_conf *port_conf = port->port_conf;
0134     struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
0135 
0136     return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
0137         port_conf->txq_exp_index : port_conf->txq_index;
0138 }
0139 
0140 static void t7xx_port_struct_init(struct t7xx_port *port)
0141 {
0142     INIT_LIST_HEAD(&port->entry);
0143     INIT_LIST_HEAD(&port->queue_entry);
0144     skb_queue_head_init(&port->rx_skb_list);
0145     init_waitqueue_head(&port->rx_wq);
0146     port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
0147     port->seq_nums[MTK_TX] = 0;
0148     atomic_set(&port->usage_cnt, 0);
0149 }
0150 
0151 struct sk_buff *t7xx_port_alloc_skb(int payload)
0152 {
0153     struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
0154 
0155     if (skb)
0156         skb_reserve(skb, sizeof(struct ccci_header));
0157 
0158     return skb;
0159 }
0160 
0161 struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
0162 {
0163     struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
0164 
0165     if (skb)
0166         skb_reserve(skb, sizeof(struct ctrl_msg_header));
0167 
0168     return skb;
0169 }
0170 
0171 /**
0172  * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
0173  * @port: port context.
0174  * @skb: received skb.
0175  *
0176  * Return:
0177  * * 0      - Success.
0178  * * -ENOBUFS   - Not enough buffer space. Caller will try again later, skb is not consumed.
0179  */
0180 int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
0181 {
0182     unsigned long flags;
0183 
0184     spin_lock_irqsave(&port->rx_wq.lock, flags);
0185     if (port->rx_skb_list.qlen >= port->rx_length_th) {
0186         spin_unlock_irqrestore(&port->rx_wq.lock, flags);
0187 
0188         return -ENOBUFS;
0189     }
0190     __skb_queue_tail(&port->rx_skb_list, skb);
0191     spin_unlock_irqrestore(&port->rx_wq.lock, flags);
0192 
0193     wake_up_all(&port->rx_wq);
0194     return 0;
0195 }
0196 
0197 static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
0198 {
0199     enum cldma_id path_id = port->port_conf->path_id;
0200     struct cldma_ctrl *md_ctrl;
0201     int ret, tx_qno;
0202 
0203     md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
0204     tx_qno = t7xx_port_get_queue_no(port);
0205     ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
0206     if (ret)
0207         dev_err(port->dev, "Failed to send skb: %d\n", ret);
0208 
0209     return ret;
0210 }
0211 
0212 static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
0213                    unsigned int pkt_header, unsigned int ex_msg)
0214 {
0215     const struct t7xx_port_conf *port_conf = port->port_conf;
0216     struct ccci_header *ccci_h;
0217     u32 status;
0218     int ret;
0219 
0220     ccci_h = skb_push(skb, sizeof(*ccci_h));
0221     status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
0222          FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
0223     ccci_h->status = cpu_to_le32(status);
0224     ccci_h->packet_header = cpu_to_le32(pkt_header);
0225     ccci_h->packet_len = cpu_to_le32(skb->len);
0226     ccci_h->ex_msg = cpu_to_le32(ex_msg);
0227 
0228     ret = t7xx_port_send_raw_skb(port, skb);
0229     if (ret)
0230         return ret;
0231 
0232     port->seq_nums[MTK_TX]++;
0233     return 0;
0234 }
0235 
0236 int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
0237                unsigned int ex_msg)
0238 {
0239     struct ctrl_msg_header *ctrl_msg_h;
0240     unsigned int msg_len = skb->len;
0241     u32 pkt_header = 0;
0242 
0243     ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
0244     ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
0245     ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
0246     ctrl_msg_h->data_length = cpu_to_le32(msg_len);
0247 
0248     if (!msg_len)
0249         pkt_header = CCCI_HEADER_NO_DATA;
0250 
0251     return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
0252 }
0253 
0254 int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
0255                unsigned int ex_msg)
0256 {
0257     struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
0258     unsigned int fsm_state;
0259 
0260     fsm_state = t7xx_fsm_get_ctl_state(ctl);
0261     if (fsm_state != FSM_STATE_PRE_START) {
0262         const struct t7xx_port_conf *port_conf = port->port_conf;
0263         enum md_state md_state = t7xx_fsm_get_md_state(ctl);
0264 
0265         switch (md_state) {
0266         case MD_STATE_EXCEPTION:
0267             if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
0268                 return -EBUSY;
0269             break;
0270 
0271         case MD_STATE_WAITING_FOR_HS1:
0272         case MD_STATE_WAITING_FOR_HS2:
0273         case MD_STATE_STOPPED:
0274         case MD_STATE_WAITING_TO_STOP:
0275         case MD_STATE_INVALID:
0276             return -ENODEV;
0277 
0278         default:
0279             break;
0280         }
0281     }
0282 
0283     return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
0284 }
0285 
0286 static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
0287 {
0288     struct t7xx_port *port;
0289 
0290     int i, j;
0291 
0292     for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
0293         INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
0294 
0295     for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
0296         for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
0297             INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
0298     }
0299 
0300     for_each_proxy_port(i, port, port_prox) {
0301         const struct t7xx_port_conf *port_conf = port->port_conf;
0302         enum cldma_id path_id = port_conf->path_id;
0303         u8 ch_id;
0304 
0305         ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
0306         list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
0307         list_add_tail(&port->queue_entry,
0308                   &port_prox->queue_ports[path_id][port_conf->rxq_index]);
0309     }
0310 }
0311 
0312 static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
0313                            struct cldma_queue *queue, u16 channel)
0314 {
0315     struct port_proxy *port_prox = t7xx_dev->md->port_prox;
0316     struct list_head *port_list;
0317     struct t7xx_port *port;
0318     u8 ch_id;
0319 
0320     ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
0321     port_list = &port_prox->rx_ch_ports[ch_id];
0322     list_for_each_entry(port, port_list, entry) {
0323         const struct t7xx_port_conf *port_conf = port->port_conf;
0324 
0325         if (queue->md_ctrl->hif_id == port_conf->path_id &&
0326             channel == port_conf->rx_ch)
0327             return port;
0328     }
0329 
0330     return NULL;
0331 }
0332 
0333 /**
0334  * t7xx_port_proxy_recv_skb() - Dispatch received skb.
0335  * @queue: CLDMA queue.
0336  * @skb: Socket buffer.
0337  *
0338  * Return:
0339  ** 0       - Packet consumed.
0340  ** -ERROR  - Failed to process skb.
0341  */
0342 static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
0343 {
0344     struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
0345     struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
0346     struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
0347     struct device *dev = queue->md_ctrl->dev;
0348     const struct t7xx_port_conf *port_conf;
0349     struct t7xx_port *port;
0350     u16 seq_num, channel;
0351     int ret;
0352 
0353     channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
0354     if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
0355         dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
0356         goto drop_skb;
0357     }
0358 
0359     port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
0360     if (!port) {
0361         dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
0362         goto drop_skb;
0363     }
0364 
0365     seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
0366     port_conf = port->port_conf;
0367     skb_pull(skb, sizeof(*ccci_h));
0368 
0369     ret = port_conf->ops->recv_skb(port, skb);
0370     /* Error indicates to try again later */
0371     if (ret) {
0372         skb_push(skb, sizeof(*ccci_h));
0373         return ret;
0374     }
0375 
0376     port->seq_nums[MTK_RX] = seq_num;
0377     return 0;
0378 
0379 drop_skb:
0380     dev_kfree_skb_any(skb);
0381     return 0;
0382 }
0383 
0384 /**
0385  * t7xx_port_proxy_md_status_notify() - Notify all ports of state.
0386  *@port_prox: The port_proxy pointer.
0387  *@state: State.
0388  *
0389  * Called by t7xx_fsm. Used to dispatch modem status for all ports,
0390  * which want to know MD state transition.
0391  */
0392 void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
0393 {
0394     struct t7xx_port *port;
0395     int i;
0396 
0397     for_each_proxy_port(i, port, port_prox) {
0398         const struct t7xx_port_conf *port_conf = port->port_conf;
0399 
0400         if (port_conf->ops->md_state_notify)
0401             port_conf->ops->md_state_notify(port, state);
0402     }
0403 }
0404 
0405 static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
0406 {
0407     struct port_proxy *port_prox = md->port_prox;
0408     struct t7xx_port *port;
0409     int i;
0410 
0411     for_each_proxy_port(i, port, port_prox) {
0412         const struct t7xx_port_conf *port_conf = port->port_conf;
0413 
0414         t7xx_port_struct_init(port);
0415 
0416         if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
0417             md->core_md.ctl_port = port;
0418 
0419         port->t7xx_dev = md->t7xx_dev;
0420         port->dev = &md->t7xx_dev->pdev->dev;
0421         spin_lock_init(&port->port_update_lock);
0422         port->chan_enable = false;
0423 
0424         if (port_conf->ops->init)
0425             port_conf->ops->init(port);
0426     }
0427 
0428     t7xx_proxy_setup_ch_mapping(port_prox);
0429 }
0430 
0431 static int t7xx_proxy_alloc(struct t7xx_modem *md)
0432 {
0433     unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf);
0434     struct device *dev = &md->t7xx_dev->pdev->dev;
0435     struct port_proxy *port_prox;
0436     int i;
0437 
0438     port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
0439                  GFP_KERNEL);
0440     if (!port_prox)
0441         return -ENOMEM;
0442 
0443     md->port_prox = port_prox;
0444     port_prox->dev = dev;
0445 
0446     for (i = 0; i < port_count; i++)
0447         port_prox->ports[i].port_conf = &t7xx_md_port_conf[i];
0448 
0449     port_prox->port_count = port_count;
0450     t7xx_proxy_init_all_ports(md);
0451     return 0;
0452 }
0453 
0454 /**
0455  * t7xx_port_proxy_init() - Initialize ports.
0456  * @md: Modem.
0457  *
0458  * Create all port instances.
0459  *
0460  * Return:
0461  * * 0      - Success.
0462  * * -ERROR - Error code from failure sub-initializations.
0463  */
0464 int t7xx_port_proxy_init(struct t7xx_modem *md)
0465 {
0466     int ret;
0467 
0468     ret = t7xx_proxy_alloc(md);
0469     if (ret)
0470         return ret;
0471 
0472     t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
0473     return 0;
0474 }
0475 
0476 void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
0477 {
0478     struct t7xx_port *port;
0479     int i;
0480 
0481     for_each_proxy_port(i, port, port_prox) {
0482         const struct t7xx_port_conf *port_conf = port->port_conf;
0483 
0484         if (port_conf->ops->uninit)
0485             port_conf->ops->uninit(port);
0486     }
0487 }
0488 
0489 int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
0490                        bool en_flag)
0491 {
0492     struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
0493     const struct t7xx_port_conf *port_conf;
0494 
0495     if (!port)
0496         return -EINVAL;
0497 
0498     port_conf = port->port_conf;
0499 
0500     if (en_flag) {
0501         if (port_conf->ops->enable_chl)
0502             port_conf->ops->enable_chl(port);
0503     } else {
0504         if (port_conf->ops->disable_chl)
0505             port_conf->ops->disable_chl(port);
0506     }
0507 
0508     return 0;
0509 }