0001
0002
0003
0004
0005
0006 #include <linux/etherdevice.h>
0007 #include <linux/if_arp.h>
0008 #include <linux/if_link.h>
0009 #include <linux/rtnetlink.h>
0010 #include <linux/wwan.h>
0011 #include <net/pkt_sched.h>
0012
0013 #include "iosm_ipc_chnl_cfg.h"
0014 #include "iosm_ipc_imem_ops.h"
0015 #include "iosm_ipc_wwan.h"
0016
0017 #define IOSM_IP_TYPE_MASK 0xF0
0018 #define IOSM_IP_TYPE_IPV4 0x40
0019 #define IOSM_IP_TYPE_IPV6 0x60
0020
0021 #define IOSM_IF_ID_PAYLOAD 2
0022
0023
0024
0025
0026
0027
0028
0029
0030 struct iosm_netdev_priv {
0031 struct iosm_wwan *ipc_wwan;
0032 struct net_device *netdev;
0033 int if_id;
0034 int ch_id;
0035 };
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 struct iosm_wwan {
0046 struct iosm_imem *ipc_imem;
0047 struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
0048 struct device *dev;
0049 struct mutex if_mutex;
0050 };
0051
0052
0053 static int ipc_wwan_link_open(struct net_device *netdev)
0054 {
0055 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
0056 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
0057 int if_id = priv->if_id;
0058 int ret;
0059
0060 if (if_id < IP_MUX_SESSION_START ||
0061 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
0062 return -EINVAL;
0063
0064 mutex_lock(&ipc_wwan->if_mutex);
0065
0066
0067 priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
0068
0069 if (priv->ch_id < 0) {
0070 dev_err(ipc_wwan->dev,
0071 "cannot connect wwan0 & id %d to the IPC mem layer",
0072 if_id);
0073 ret = -ENODEV;
0074 goto out;
0075 }
0076
0077
0078 netif_start_queue(netdev);
0079
0080 dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
0081 priv->ch_id, priv->if_id);
0082
0083 ret = 0;
0084 out:
0085 mutex_unlock(&ipc_wwan->if_mutex);
0086 return ret;
0087 }
0088
0089
0090 static int ipc_wwan_link_stop(struct net_device *netdev)
0091 {
0092 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
0093
0094 netif_stop_queue(netdev);
0095
0096 mutex_lock(&priv->ipc_wwan->if_mutex);
0097 ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
0098 priv->ch_id);
0099 priv->ch_id = -1;
0100 mutex_unlock(&priv->ipc_wwan->if_mutex);
0101
0102 return 0;
0103 }
0104
0105
0106 static int ipc_wwan_link_transmit(struct sk_buff *skb,
0107 struct net_device *netdev)
0108 {
0109 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
0110 struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
0111 unsigned int len = skb->len;
0112 int if_id = priv->if_id;
0113 int ret;
0114
0115
0116
0117
0118 if (if_id < IP_MUX_SESSION_START ||
0119 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
0120 return -EINVAL;
0121
0122
0123 ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
0124 if_id, priv->ch_id, skb);
0125
0126
0127 if (ret == 0) {
0128 netdev->stats.tx_packets++;
0129 netdev->stats.tx_bytes += len;
0130 ret = NETDEV_TX_OK;
0131 } else if (ret == -EBUSY) {
0132 ret = NETDEV_TX_BUSY;
0133 dev_err(ipc_wwan->dev, "unable to push packets");
0134 } else {
0135 goto exit;
0136 }
0137
0138 return ret;
0139
0140 exit:
0141
0142 if (if_id)
0143 dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
0144 ret);
0145
0146 dev_kfree_skb_any(skb);
0147 netdev->stats.tx_dropped++;
0148 return NETDEV_TX_OK;
0149 }
0150
0151
0152 static const struct net_device_ops ipc_inm_ops = {
0153 .ndo_open = ipc_wwan_link_open,
0154 .ndo_stop = ipc_wwan_link_stop,
0155 .ndo_start_xmit = ipc_wwan_link_transmit,
0156 };
0157
0158
0159 static void ipc_wwan_setup(struct net_device *iosm_dev)
0160 {
0161 iosm_dev->header_ops = NULL;
0162 iosm_dev->hard_header_len = 0;
0163 iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
0164
0165 iosm_dev->type = ARPHRD_NONE;
0166 iosm_dev->mtu = ETH_DATA_LEN;
0167 iosm_dev->min_mtu = ETH_MIN_MTU;
0168 iosm_dev->max_mtu = ETH_MAX_MTU;
0169
0170 iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
0171
0172 iosm_dev->netdev_ops = &ipc_inm_ops;
0173 }
0174
0175
0176 static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
0177 u32 if_id, struct netlink_ext_ack *extack)
0178 {
0179 struct iosm_wwan *ipc_wwan = ctxt;
0180 struct iosm_netdev_priv *priv;
0181 int err;
0182
0183 if (if_id < IP_MUX_SESSION_START ||
0184 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
0185 return -EINVAL;
0186
0187 priv = wwan_netdev_drvpriv(dev);
0188 priv->if_id = if_id;
0189 priv->netdev = dev;
0190 priv->ipc_wwan = ipc_wwan;
0191
0192 mutex_lock(&ipc_wwan->if_mutex);
0193 if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
0194 err = -EBUSY;
0195 goto out_unlock;
0196 }
0197
0198 err = register_netdevice(dev);
0199 if (err)
0200 goto out_unlock;
0201
0202 rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
0203 mutex_unlock(&ipc_wwan->if_mutex);
0204
0205 netif_device_attach(dev);
0206
0207 return 0;
0208
0209 out_unlock:
0210 mutex_unlock(&ipc_wwan->if_mutex);
0211 return err;
0212 }
0213
0214 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
0215 struct list_head *head)
0216 {
0217 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
0218 struct iosm_wwan *ipc_wwan = ctxt;
0219 int if_id = priv->if_id;
0220
0221 if (WARN_ON(if_id < IP_MUX_SESSION_START ||
0222 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
0223 return;
0224
0225 mutex_lock(&ipc_wwan->if_mutex);
0226
0227 if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
0228 goto unlock;
0229
0230 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
0231
0232 unregister_netdevice_queue(dev, head);
0233
0234 unlock:
0235 mutex_unlock(&ipc_wwan->if_mutex);
0236 }
0237
0238 static const struct wwan_ops iosm_wwan_ops = {
0239 .priv_size = sizeof(struct iosm_netdev_priv),
0240 .setup = ipc_wwan_setup,
0241 .newlink = ipc_wwan_newlink,
0242 .dellink = ipc_wwan_dellink,
0243 };
0244
0245 int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
0246 bool dss, int if_id)
0247 {
0248 struct sk_buff *skb = skb_arg;
0249 struct net_device_stats *stats;
0250 struct iosm_netdev_priv *priv;
0251 int ret;
0252
0253 if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
0254 skb->protocol = htons(ETH_P_IP);
0255 else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
0256 IOSM_IP_TYPE_IPV6)
0257 skb->protocol = htons(ETH_P_IPV6);
0258
0259 skb->pkt_type = PACKET_HOST;
0260
0261 if (if_id < IP_MUX_SESSION_START ||
0262 if_id > IP_MUX_SESSION_END) {
0263 ret = -EINVAL;
0264 goto free;
0265 }
0266
0267 rcu_read_lock();
0268 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
0269 if (!priv) {
0270 ret = -EINVAL;
0271 goto unlock;
0272 }
0273 skb->dev = priv->netdev;
0274 stats = &priv->netdev->stats;
0275 stats->rx_packets++;
0276 stats->rx_bytes += skb->len;
0277
0278 ret = netif_rx(skb);
0279 skb = NULL;
0280 unlock:
0281 rcu_read_unlock();
0282 free:
0283 dev_kfree_skb(skb);
0284 return ret;
0285 }
0286
0287 void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
0288 {
0289 struct net_device *netdev;
0290 struct iosm_netdev_priv *priv;
0291 bool is_tx_blk;
0292
0293 rcu_read_lock();
0294 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
0295 if (!priv) {
0296 rcu_read_unlock();
0297 return;
0298 }
0299
0300 netdev = priv->netdev;
0301
0302 is_tx_blk = netif_queue_stopped(netdev);
0303
0304 if (on)
0305 dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
0306 if_id);
0307
0308 if (on && !is_tx_blk)
0309 netif_stop_queue(netdev);
0310 else if (!on && is_tx_blk)
0311 netif_wake_queue(netdev);
0312 rcu_read_unlock();
0313 }
0314
0315 struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
0316 {
0317 struct iosm_wwan *ipc_wwan;
0318
0319 ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
0320 if (!ipc_wwan)
0321 return NULL;
0322
0323 ipc_wwan->dev = dev;
0324 ipc_wwan->ipc_imem = ipc_imem;
0325
0326
0327 if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
0328 IP_MUX_SESSION_DEFAULT)) {
0329 kfree(ipc_wwan);
0330 return NULL;
0331 }
0332
0333 mutex_init(&ipc_wwan->if_mutex);
0334
0335 return ipc_wwan;
0336 }
0337
0338 void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
0339 {
0340
0341 wwan_unregister_ops(ipc_wwan->dev);
0342
0343 mutex_destroy(&ipc_wwan->if_mutex);
0344
0345 kfree(ipc_wwan);
0346 }