Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2021, MediaTek Inc.
0004  * Copyright (c) 2021-2022, Intel Corporation.
0005  *
0006  * Authors:
0007  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
0008  *  Haijun Liu <haijun.liu@mediatek.com>
0009  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
0010  *
0011  * Contributors:
0012  *  Amir Hanania <amir.hanania@intel.com>
0013  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
0014  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
0015  *  Eliot Lee <eliot.lee@intel.com>
0016  *  Moises Veleta <moises.veleta@intel.com>
0017  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
0018  */
0019 
0020 #include <linux/atomic.h>
0021 #include <linux/device.h>
0022 #include <linux/gfp.h>
0023 #include <linux/if_arp.h>
0024 #include <linux/if_ether.h>
0025 #include <linux/kernel.h>
0026 #include <linux/list.h>
0027 #include <linux/netdev_features.h>
0028 #include <linux/netdevice.h>
0029 #include <linux/skbuff.h>
0030 #include <linux/types.h>
0031 #include <linux/wwan.h>
0032 #include <net/pkt_sched.h>
0033 
0034 #include "t7xx_hif_dpmaif_rx.h"
0035 #include "t7xx_hif_dpmaif_tx.h"
0036 #include "t7xx_netdev.h"
0037 #include "t7xx_pci.h"
0038 #include "t7xx_port_proxy.h"
0039 #include "t7xx_state_monitor.h"
0040 
0041 #define IP_MUX_SESSION_DEFAULT  0
0042 
0043 static int t7xx_ccmni_open(struct net_device *dev)
0044 {
0045     struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
0046 
0047     netif_carrier_on(dev);
0048     netif_tx_start_all_queues(dev);
0049     atomic_inc(&ccmni->usage);
0050     return 0;
0051 }
0052 
0053 static int t7xx_ccmni_close(struct net_device *dev)
0054 {
0055     struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
0056 
0057     atomic_dec(&ccmni->usage);
0058     netif_carrier_off(dev);
0059     netif_tx_disable(dev);
0060     return 0;
0061 }
0062 
0063 static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
0064                   unsigned int txq_number)
0065 {
0066     struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
0067     struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
0068 
0069     skb_cb->netif_idx = ccmni->index;
0070 
0071     if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
0072         return NETDEV_TX_BUSY;
0073 
0074     return 0;
0075 }
0076 
0077 static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
0078 {
0079     struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
0080     int skb_len = skb->len;
0081 
0082     /* If MTU is changed or there is no headroom, drop the packet */
0083     if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
0084         dev_kfree_skb(skb);
0085         dev->stats.tx_dropped++;
0086         return NETDEV_TX_OK;
0087     }
0088 
0089     if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
0090         return NETDEV_TX_BUSY;
0091 
0092     dev->stats.tx_packets++;
0093     dev->stats.tx_bytes += skb_len;
0094 
0095     return NETDEV_TX_OK;
0096 }
0097 
0098 static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
0099 {
0100     struct t7xx_ccmni *ccmni = netdev_priv(dev);
0101 
0102     dev->stats.tx_errors++;
0103 
0104     if (atomic_read(&ccmni->usage) > 0)
0105         netif_tx_wake_all_queues(dev);
0106 }
0107 
0108 static const struct net_device_ops ccmni_netdev_ops = {
0109     .ndo_open     = t7xx_ccmni_open,
0110     .ndo_stop     = t7xx_ccmni_close,
0111     .ndo_start_xmit   = t7xx_ccmni_start_xmit,
0112     .ndo_tx_timeout   = t7xx_ccmni_tx_timeout,
0113 };
0114 
0115 static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
0116 {
0117     struct t7xx_ccmni *ccmni;
0118     int i;
0119 
0120     for (i = 0; i < ctlb->nic_dev_num; i++) {
0121         ccmni = ctlb->ccmni_inst[i];
0122         if (!ccmni)
0123             continue;
0124 
0125         if (atomic_read(&ccmni->usage) > 0) {
0126             netif_tx_start_all_queues(ccmni->dev);
0127             netif_carrier_on(ccmni->dev);
0128         }
0129     }
0130 }
0131 
0132 static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
0133 {
0134     struct t7xx_ccmni *ccmni;
0135     int i;
0136 
0137     for (i = 0; i < ctlb->nic_dev_num; i++) {
0138         ccmni = ctlb->ccmni_inst[i];
0139         if (!ccmni)
0140             continue;
0141 
0142         if (atomic_read(&ccmni->usage) > 0)
0143             netif_tx_disable(ccmni->dev);
0144     }
0145 }
0146 
0147 static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
0148 {
0149     struct t7xx_ccmni *ccmni;
0150     int i;
0151 
0152     for (i = 0; i < ctlb->nic_dev_num; i++) {
0153         ccmni = ctlb->ccmni_inst[i];
0154         if (!ccmni)
0155             continue;
0156 
0157         if (atomic_read(&ccmni->usage) > 0)
0158             netif_carrier_off(ccmni->dev);
0159     }
0160 }
0161 
0162 static void t7xx_ccmni_wwan_setup(struct net_device *dev)
0163 {
0164     dev->hard_header_len += sizeof(struct ccci_header);
0165 
0166     dev->mtu = ETH_DATA_LEN;
0167     dev->max_mtu = CCMNI_MTU_MAX;
0168     BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
0169 
0170     dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
0171     dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
0172 
0173     dev->flags = IFF_POINTOPOINT | IFF_NOARP;
0174 
0175     dev->features = NETIF_F_VLAN_CHALLENGED;
0176 
0177     dev->features |= NETIF_F_SG;
0178     dev->hw_features |= NETIF_F_SG;
0179 
0180     dev->features |= NETIF_F_HW_CSUM;
0181     dev->hw_features |= NETIF_F_HW_CSUM;
0182 
0183     dev->features |= NETIF_F_RXCSUM;
0184     dev->hw_features |= NETIF_F_RXCSUM;
0185 
0186     dev->needs_free_netdev = true;
0187 
0188     dev->type = ARPHRD_NONE;
0189 
0190     dev->netdev_ops = &ccmni_netdev_ops;
0191 }
0192 
0193 static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
0194                    struct netlink_ext_ack *extack)
0195 {
0196     struct t7xx_ccmni_ctrl *ctlb = ctxt;
0197     struct t7xx_ccmni *ccmni;
0198     int ret;
0199 
0200     if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
0201         return -EINVAL;
0202 
0203     ccmni = wwan_netdev_drvpriv(dev);
0204     ccmni->index = if_id;
0205     ccmni->ctlb = ctlb;
0206     ccmni->dev = dev;
0207     atomic_set(&ccmni->usage, 0);
0208     ctlb->ccmni_inst[if_id] = ccmni;
0209 
0210     ret = register_netdevice(dev);
0211     if (ret)
0212         return ret;
0213 
0214     netif_device_attach(dev);
0215     return 0;
0216 }
0217 
0218 static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
0219 {
0220     struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
0221     struct t7xx_ccmni_ctrl *ctlb = ctxt;
0222     u8 if_id = ccmni->index;
0223 
0224     if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
0225         return;
0226 
0227     if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
0228         return;
0229 
0230     unregister_netdevice(dev);
0231 }
0232 
0233 static const struct wwan_ops ccmni_wwan_ops = {
0234     .priv_size = sizeof(struct t7xx_ccmni),
0235     .setup     = t7xx_ccmni_wwan_setup,
0236     .newlink   = t7xx_ccmni_wwan_newlink,
0237     .dellink   = t7xx_ccmni_wwan_dellink,
0238 };
0239 
0240 static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
0241 {
0242     struct device *dev = ctlb->hif_ctrl->dev;
0243     int ret;
0244 
0245     if (ctlb->wwan_is_registered)
0246         return 0;
0247 
0248     /* WWAN core will create a netdev for the default IP MUX channel */
0249     ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
0250     if (ret < 0) {
0251         dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
0252         return ret;
0253     }
0254 
0255     ctlb->wwan_is_registered = true;
0256     return 0;
0257 }
0258 
0259 static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
0260 {
0261     struct t7xx_ccmni_ctrl *ctlb = para;
0262     struct device *dev;
0263     int ret = 0;
0264 
0265     dev = ctlb->hif_ctrl->dev;
0266     ctlb->md_sta = state;
0267 
0268     switch (state) {
0269     case MD_STATE_READY:
0270         ret = t7xx_ccmni_register_wwan(ctlb);
0271         if (!ret)
0272             t7xx_ccmni_start(ctlb);
0273         break;
0274 
0275     case MD_STATE_EXCEPTION:
0276     case MD_STATE_STOPPED:
0277         t7xx_ccmni_pre_stop(ctlb);
0278 
0279         ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
0280         if (ret < 0)
0281             dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
0282 
0283         t7xx_ccmni_post_stop(ctlb);
0284         break;
0285 
0286     case MD_STATE_WAITING_FOR_HS1:
0287     case MD_STATE_WAITING_TO_STOP:
0288         ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
0289         if (ret < 0)
0290             dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
0291 
0292         break;
0293 
0294     default:
0295         break;
0296     }
0297 
0298     return ret;
0299 }
0300 
0301 static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
0302 {
0303     struct t7xx_ccmni_ctrl  *ctlb = t7xx_dev->ccmni_ctlb;
0304     struct t7xx_fsm_notifier *md_status_notifier;
0305 
0306     md_status_notifier = &ctlb->md_status_notify;
0307     INIT_LIST_HEAD(&md_status_notifier->entry);
0308     md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
0309     md_status_notifier->data = ctlb;
0310 
0311     t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
0312 }
0313 
0314 static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
0315 {
0316     struct t7xx_skb_cb *skb_cb;
0317     struct net_device *net_dev;
0318     struct t7xx_ccmni *ccmni;
0319     int pkt_type, skb_len;
0320     u8 netif_id;
0321 
0322     skb_cb = T7XX_SKB_CB(skb);
0323     netif_id = skb_cb->netif_idx;
0324     ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
0325     if (!ccmni) {
0326         dev_kfree_skb(skb);
0327         return;
0328     }
0329 
0330     net_dev = ccmni->dev;
0331     skb->dev = net_dev;
0332 
0333     pkt_type = skb_cb->rx_pkt_type;
0334     if (pkt_type == PKT_TYPE_IP6)
0335         skb->protocol = htons(ETH_P_IPV6);
0336     else
0337         skb->protocol = htons(ETH_P_IP);
0338 
0339     skb_len = skb->len;
0340     netif_rx(skb);
0341     net_dev->stats.rx_packets++;
0342     net_dev->stats.rx_bytes += skb_len;
0343 }
0344 
0345 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
0346 {
0347     struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
0348     struct netdev_queue *net_queue;
0349 
0350     if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
0351         net_queue = netdev_get_tx_queue(ccmni->dev, qno);
0352         if (netif_tx_queue_stopped(net_queue))
0353             netif_tx_wake_queue(net_queue);
0354     }
0355 }
0356 
0357 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
0358 {
0359     struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
0360     struct netdev_queue *net_queue;
0361 
0362     if (atomic_read(&ccmni->usage) > 0) {
0363         netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
0364         net_queue = netdev_get_tx_queue(ccmni->dev, qno);
0365         netif_tx_stop_queue(net_queue);
0366     }
0367 }
0368 
0369 static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
0370                       enum dpmaif_txq_state state, int qno)
0371 {
0372     struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
0373 
0374     if (ctlb->md_sta != MD_STATE_READY)
0375         return;
0376 
0377     if (!ctlb->ccmni_inst[0]) {
0378         dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
0379         return;
0380     }
0381 
0382     if (state == DMPAIF_TXQ_STATE_IRQ)
0383         t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
0384     else if (state == DMPAIF_TXQ_STATE_FULL)
0385         t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
0386 }
0387 
0388 int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
0389 {
0390     struct device *dev = &t7xx_dev->pdev->dev;
0391     struct t7xx_ccmni_ctrl *ctlb;
0392 
0393     ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
0394     if (!ctlb)
0395         return -ENOMEM;
0396 
0397     t7xx_dev->ccmni_ctlb = ctlb;
0398     ctlb->t7xx_dev = t7xx_dev;
0399     ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
0400     ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
0401     ctlb->nic_dev_num = NIC_DEV_DEFAULT;
0402 
0403     ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
0404     if (!ctlb->hif_ctrl)
0405         return -ENOMEM;
0406 
0407     init_md_status_notifier(t7xx_dev);
0408     return 0;
0409 }
0410 
0411 void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
0412 {
0413     struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
0414 
0415     t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
0416 
0417     if (ctlb->wwan_is_registered) {
0418         wwan_unregister_ops(&t7xx_dev->pdev->dev);
0419         ctlb->wwan_is_registered = false;
0420     }
0421 
0422     t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
0423 }