Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * CAIF Interface registration.
0004  * Copyright (C) ST-Ericsson AB 2010
0005  * Author:  Sjur Brendeland
0006  *
0007  * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
0008  *  and Sakari Ailus <sakari.ailus@nokia.com>
0009  */
0010 
0011 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
0012 
0013 #include <linux/kernel.h>
0014 #include <linux/if_arp.h>
0015 #include <linux/net.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/mutex.h>
0018 #include <linux/module.h>
0019 #include <linux/spinlock.h>
0020 #include <net/netns/generic.h>
0021 #include <net/net_namespace.h>
0022 #include <net/pkt_sched.h>
0023 #include <net/caif/caif_device.h>
0024 #include <net/caif/caif_layer.h>
0025 #include <net/caif/caif_dev.h>
0026 #include <net/caif/cfpkt.h>
0027 #include <net/caif/cfcnfg.h>
0028 #include <net/caif/cfserl.h>
0029 
0030 MODULE_LICENSE("GPL");
0031 
0032 /* Used for local tracking of the CAIF net devices */
0033 struct caif_device_entry {
0034     struct cflayer layer;
0035     struct list_head list;
0036     struct net_device *netdev;
0037     int __percpu *pcpu_refcnt;
0038     spinlock_t flow_lock;
0039     struct sk_buff *xoff_skb;
0040     void (*xoff_skb_dtor)(struct sk_buff *skb);
0041     bool xoff;
0042 };
0043 
0044 struct caif_device_entry_list {
0045     struct list_head list;
0046     /* Protects simulanous deletes in list */
0047     struct mutex lock;
0048 };
0049 
0050 struct caif_net {
0051     struct cfcnfg *cfg;
0052     struct caif_device_entry_list caifdevs;
0053 };
0054 
0055 static unsigned int caif_net_id;
0056 static int q_high = 50; /* Percent */
0057 
0058 struct cfcnfg *get_cfcnfg(struct net *net)
0059 {
0060     struct caif_net *caifn;
0061     caifn = net_generic(net, caif_net_id);
0062     return caifn->cfg;
0063 }
0064 EXPORT_SYMBOL(get_cfcnfg);
0065 
0066 static struct caif_device_entry_list *caif_device_list(struct net *net)
0067 {
0068     struct caif_net *caifn;
0069     caifn = net_generic(net, caif_net_id);
0070     return &caifn->caifdevs;
0071 }
0072 
0073 static void caifd_put(struct caif_device_entry *e)
0074 {
0075     this_cpu_dec(*e->pcpu_refcnt);
0076 }
0077 
0078 static void caifd_hold(struct caif_device_entry *e)
0079 {
0080     this_cpu_inc(*e->pcpu_refcnt);
0081 }
0082 
0083 static int caifd_refcnt_read(struct caif_device_entry *e)
0084 {
0085     int i, refcnt = 0;
0086     for_each_possible_cpu(i)
0087         refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
0088     return refcnt;
0089 }
0090 
0091 /* Allocate new CAIF device. */
0092 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
0093 {
0094     struct caif_device_entry *caifd;
0095 
0096     caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
0097     if (!caifd)
0098         return NULL;
0099     caifd->pcpu_refcnt = alloc_percpu(int);
0100     if (!caifd->pcpu_refcnt) {
0101         kfree(caifd);
0102         return NULL;
0103     }
0104     caifd->netdev = dev;
0105     dev_hold(dev);
0106     return caifd;
0107 }
0108 
0109 static struct caif_device_entry *caif_get(struct net_device *dev)
0110 {
0111     struct caif_device_entry_list *caifdevs =
0112         caif_device_list(dev_net(dev));
0113     struct caif_device_entry *caifd;
0114 
0115     list_for_each_entry_rcu(caifd, &caifdevs->list, list,
0116                 lockdep_rtnl_is_held()) {
0117         if (caifd->netdev == dev)
0118             return caifd;
0119     }
0120     return NULL;
0121 }
0122 
0123 static void caif_flow_cb(struct sk_buff *skb)
0124 {
0125     struct caif_device_entry *caifd;
0126     void (*dtor)(struct sk_buff *skb) = NULL;
0127     bool send_xoff;
0128 
0129     WARN_ON(skb->dev == NULL);
0130 
0131     rcu_read_lock();
0132     caifd = caif_get(skb->dev);
0133 
0134     WARN_ON(caifd == NULL);
0135     if (!caifd) {
0136         rcu_read_unlock();
0137         return;
0138     }
0139 
0140     caifd_hold(caifd);
0141     rcu_read_unlock();
0142 
0143     spin_lock_bh(&caifd->flow_lock);
0144     send_xoff = caifd->xoff;
0145     caifd->xoff = false;
0146     dtor = caifd->xoff_skb_dtor;
0147 
0148     if (WARN_ON(caifd->xoff_skb != skb))
0149         skb = NULL;
0150 
0151     caifd->xoff_skb = NULL;
0152     caifd->xoff_skb_dtor = NULL;
0153 
0154     spin_unlock_bh(&caifd->flow_lock);
0155 
0156     if (dtor && skb)
0157         dtor(skb);
0158 
0159     if (send_xoff)
0160         caifd->layer.up->
0161             ctrlcmd(caifd->layer.up,
0162                 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
0163                 caifd->layer.id);
0164     caifd_put(caifd);
0165 }
0166 
0167 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
0168 {
0169     int err, high = 0, qlen = 0;
0170     struct caif_device_entry *caifd =
0171         container_of(layer, struct caif_device_entry, layer);
0172     struct sk_buff *skb;
0173     struct netdev_queue *txq;
0174 
0175     rcu_read_lock_bh();
0176 
0177     skb = cfpkt_tonative(pkt);
0178     skb->dev = caifd->netdev;
0179     skb_reset_network_header(skb);
0180     skb->protocol = htons(ETH_P_CAIF);
0181 
0182     /* Check if we need to handle xoff */
0183     if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
0184         goto noxoff;
0185 
0186     if (unlikely(caifd->xoff))
0187         goto noxoff;
0188 
0189     if (likely(!netif_queue_stopped(caifd->netdev))) {
0190         struct Qdisc *sch;
0191 
0192         /* If we run with a TX queue, check if the queue is too long*/
0193         txq = netdev_get_tx_queue(skb->dev, 0);
0194         sch = rcu_dereference_bh(txq->qdisc);
0195         if (likely(qdisc_is_empty(sch)))
0196             goto noxoff;
0197 
0198         /* can check for explicit qdisc len value only !NOLOCK,
0199          * always set flow off otherwise
0200          */
0201         high = (caifd->netdev->tx_queue_len * q_high) / 100;
0202         if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
0203             goto noxoff;
0204     }
0205 
0206     /* Hold lock while accessing xoff */
0207     spin_lock_bh(&caifd->flow_lock);
0208     if (caifd->xoff) {
0209         spin_unlock_bh(&caifd->flow_lock);
0210         goto noxoff;
0211     }
0212 
0213     /*
0214      * Handle flow off, we do this by temporary hi-jacking this
0215      * skb's destructor function, and replace it with our own
0216      * flow-on callback. The callback will set flow-on and call
0217      * the original destructor.
0218      */
0219 
0220     pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
0221             netif_queue_stopped(caifd->netdev),
0222             qlen, high);
0223     caifd->xoff = true;
0224     caifd->xoff_skb = skb;
0225     caifd->xoff_skb_dtor = skb->destructor;
0226     skb->destructor = caif_flow_cb;
0227     spin_unlock_bh(&caifd->flow_lock);
0228 
0229     caifd->layer.up->ctrlcmd(caifd->layer.up,
0230                     _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
0231                     caifd->layer.id);
0232 noxoff:
0233     rcu_read_unlock_bh();
0234 
0235     err = dev_queue_xmit(skb);
0236     if (err > 0)
0237         err = -EIO;
0238 
0239     return err;
0240 }
0241 
0242 /*
0243  * Stuff received packets into the CAIF stack.
0244  * On error, returns non-zero and releases the skb.
0245  */
0246 static int receive(struct sk_buff *skb, struct net_device *dev,
0247            struct packet_type *pkttype, struct net_device *orig_dev)
0248 {
0249     struct cfpkt *pkt;
0250     struct caif_device_entry *caifd;
0251     int err;
0252 
0253     pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
0254 
0255     rcu_read_lock();
0256     caifd = caif_get(dev);
0257 
0258     if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
0259             !netif_oper_up(caifd->netdev)) {
0260         rcu_read_unlock();
0261         kfree_skb(skb);
0262         return NET_RX_DROP;
0263     }
0264 
0265     /* Hold reference to netdevice while using CAIF stack */
0266     caifd_hold(caifd);
0267     rcu_read_unlock();
0268 
0269     err = caifd->layer.up->receive(caifd->layer.up, pkt);
0270 
0271     /* For -EILSEQ the packet is not freed so free it now */
0272     if (err == -EILSEQ)
0273         cfpkt_destroy(pkt);
0274 
0275     /* Release reference to stack upwards */
0276     caifd_put(caifd);
0277 
0278     if (err != 0)
0279         err = NET_RX_DROP;
0280     return err;
0281 }
0282 
0283 static struct packet_type caif_packet_type __read_mostly = {
0284     .type = cpu_to_be16(ETH_P_CAIF),
0285     .func = receive,
0286 };
0287 
0288 static void dev_flowctrl(struct net_device *dev, int on)
0289 {
0290     struct caif_device_entry *caifd;
0291 
0292     rcu_read_lock();
0293 
0294     caifd = caif_get(dev);
0295     if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
0296         rcu_read_unlock();
0297         return;
0298     }
0299 
0300     caifd_hold(caifd);
0301     rcu_read_unlock();
0302 
0303     caifd->layer.up->ctrlcmd(caifd->layer.up,
0304                  on ?
0305                  _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
0306                  _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
0307                  caifd->layer.id);
0308     caifd_put(caifd);
0309 }
0310 
0311 int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
0312              struct cflayer *link_support, int head_room,
0313              struct cflayer **layer,
0314              int (**rcv_func)(struct sk_buff *, struct net_device *,
0315                       struct packet_type *,
0316                       struct net_device *))
0317 {
0318     struct caif_device_entry *caifd;
0319     enum cfcnfg_phy_preference pref;
0320     struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
0321     struct caif_device_entry_list *caifdevs;
0322     int res;
0323 
0324     caifdevs = caif_device_list(dev_net(dev));
0325     caifd = caif_device_alloc(dev);
0326     if (!caifd)
0327         return -ENOMEM;
0328     *layer = &caifd->layer;
0329     spin_lock_init(&caifd->flow_lock);
0330 
0331     switch (caifdev->link_select) {
0332     case CAIF_LINK_HIGH_BANDW:
0333         pref = CFPHYPREF_HIGH_BW;
0334         break;
0335     case CAIF_LINK_LOW_LATENCY:
0336         pref = CFPHYPREF_LOW_LAT;
0337         break;
0338     default:
0339         pref = CFPHYPREF_HIGH_BW;
0340         break;
0341     }
0342     mutex_lock(&caifdevs->lock);
0343     list_add_rcu(&caifd->list, &caifdevs->list);
0344 
0345     strlcpy(caifd->layer.name, dev->name,
0346         sizeof(caifd->layer.name));
0347     caifd->layer.transmit = transmit;
0348     res = cfcnfg_add_phy_layer(cfg,
0349                 dev,
0350                 &caifd->layer,
0351                 pref,
0352                 link_support,
0353                 caifdev->use_fcs,
0354                 head_room);
0355     mutex_unlock(&caifdevs->lock);
0356     if (rcv_func)
0357         *rcv_func = receive;
0358     return res;
0359 }
0360 EXPORT_SYMBOL(caif_enroll_dev);
0361 
0362 /* notify Caif of device events */
0363 static int caif_device_notify(struct notifier_block *me, unsigned long what,
0364                   void *ptr)
0365 {
0366     struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0367     struct caif_device_entry *caifd = NULL;
0368     struct caif_dev_common *caifdev;
0369     struct cfcnfg *cfg;
0370     struct cflayer *layer, *link_support;
0371     int head_room = 0;
0372     struct caif_device_entry_list *caifdevs;
0373     int res;
0374 
0375     cfg = get_cfcnfg(dev_net(dev));
0376     caifdevs = caif_device_list(dev_net(dev));
0377 
0378     caifd = caif_get(dev);
0379     if (caifd == NULL && dev->type != ARPHRD_CAIF)
0380         return 0;
0381 
0382     switch (what) {
0383     case NETDEV_REGISTER:
0384         if (caifd != NULL)
0385             break;
0386 
0387         caifdev = netdev_priv(dev);
0388 
0389         link_support = NULL;
0390         if (caifdev->use_frag) {
0391             head_room = 1;
0392             link_support = cfserl_create(dev->ifindex,
0393                             caifdev->use_stx);
0394             if (!link_support) {
0395                 pr_warn("Out of memory\n");
0396                 break;
0397             }
0398         }
0399         res = caif_enroll_dev(dev, caifdev, link_support, head_room,
0400                 &layer, NULL);
0401         if (res)
0402             cfserl_release(link_support);
0403         caifdev->flowctrl = dev_flowctrl;
0404         break;
0405 
0406     case NETDEV_UP:
0407         rcu_read_lock();
0408 
0409         caifd = caif_get(dev);
0410         if (caifd == NULL) {
0411             rcu_read_unlock();
0412             break;
0413         }
0414 
0415         caifd->xoff = false;
0416         cfcnfg_set_phy_state(cfg, &caifd->layer, true);
0417         rcu_read_unlock();
0418 
0419         break;
0420 
0421     case NETDEV_DOWN:
0422         rcu_read_lock();
0423 
0424         caifd = caif_get(dev);
0425         if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
0426             rcu_read_unlock();
0427             return -EINVAL;
0428         }
0429 
0430         cfcnfg_set_phy_state(cfg, &caifd->layer, false);
0431         caifd_hold(caifd);
0432         rcu_read_unlock();
0433 
0434         caifd->layer.up->ctrlcmd(caifd->layer.up,
0435                      _CAIF_CTRLCMD_PHYIF_DOWN_IND,
0436                      caifd->layer.id);
0437 
0438         spin_lock_bh(&caifd->flow_lock);
0439 
0440         /*
0441          * Replace our xoff-destructor with original destructor.
0442          * We trust that skb->destructor *always* is called before
0443          * the skb reference is invalid. The hijacked SKB destructor
0444          * takes the flow_lock so manipulating the skb->destructor here
0445          * should be safe.
0446         */
0447         if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
0448             caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
0449 
0450         caifd->xoff = false;
0451         caifd->xoff_skb_dtor = NULL;
0452         caifd->xoff_skb = NULL;
0453 
0454         spin_unlock_bh(&caifd->flow_lock);
0455         caifd_put(caifd);
0456         break;
0457 
0458     case NETDEV_UNREGISTER:
0459         mutex_lock(&caifdevs->lock);
0460 
0461         caifd = caif_get(dev);
0462         if (caifd == NULL) {
0463             mutex_unlock(&caifdevs->lock);
0464             break;
0465         }
0466         list_del_rcu(&caifd->list);
0467 
0468         /*
0469          * NETDEV_UNREGISTER is called repeatedly until all reference
0470          * counts for the net-device are released. If references to
0471          * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
0472          * the next call to NETDEV_UNREGISTER.
0473          *
0474          * If any packets are in flight down the CAIF Stack,
0475          * cfcnfg_del_phy_layer will return nonzero.
0476          * If no packets are in flight, the CAIF Stack associated
0477          * with the net-device un-registering is freed.
0478          */
0479 
0480         if (caifd_refcnt_read(caifd) != 0 ||
0481             cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
0482 
0483             pr_info("Wait for device inuse\n");
0484             /* Enrole device if CAIF Stack is still in use */
0485             list_add_rcu(&caifd->list, &caifdevs->list);
0486             mutex_unlock(&caifdevs->lock);
0487             break;
0488         }
0489 
0490         synchronize_rcu();
0491         dev_put(caifd->netdev);
0492         free_percpu(caifd->pcpu_refcnt);
0493         kfree(caifd);
0494 
0495         mutex_unlock(&caifdevs->lock);
0496         break;
0497     }
0498     return 0;
0499 }
0500 
0501 static struct notifier_block caif_device_notifier = {
0502     .notifier_call = caif_device_notify,
0503     .priority = 0,
0504 };
0505 
0506 /* Per-namespace Caif devices handling */
0507 static int caif_init_net(struct net *net)
0508 {
0509     struct caif_net *caifn = net_generic(net, caif_net_id);
0510     INIT_LIST_HEAD(&caifn->caifdevs.list);
0511     mutex_init(&caifn->caifdevs.lock);
0512 
0513     caifn->cfg = cfcnfg_create();
0514     if (!caifn->cfg)
0515         return -ENOMEM;
0516 
0517     return 0;
0518 }
0519 
0520 static void caif_exit_net(struct net *net)
0521 {
0522     struct caif_device_entry *caifd, *tmp;
0523     struct caif_device_entry_list *caifdevs =
0524         caif_device_list(net);
0525     struct cfcnfg *cfg =  get_cfcnfg(net);
0526 
0527     rtnl_lock();
0528     mutex_lock(&caifdevs->lock);
0529 
0530     list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
0531         int i = 0;
0532         list_del_rcu(&caifd->list);
0533         cfcnfg_set_phy_state(cfg, &caifd->layer, false);
0534 
0535         while (i < 10 &&
0536             (caifd_refcnt_read(caifd) != 0 ||
0537             cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
0538 
0539             pr_info("Wait for device inuse\n");
0540             msleep(250);
0541             i++;
0542         }
0543         synchronize_rcu();
0544         dev_put(caifd->netdev);
0545         free_percpu(caifd->pcpu_refcnt);
0546         kfree(caifd);
0547     }
0548     cfcnfg_remove(cfg);
0549 
0550     mutex_unlock(&caifdevs->lock);
0551     rtnl_unlock();
0552 }
0553 
0554 static struct pernet_operations caif_net_ops = {
0555     .init = caif_init_net,
0556     .exit = caif_exit_net,
0557     .id   = &caif_net_id,
0558     .size = sizeof(struct caif_net),
0559 };
0560 
0561 /* Initialize Caif devices list */
0562 static int __init caif_device_init(void)
0563 {
0564     int result;
0565 
0566     result = register_pernet_subsys(&caif_net_ops);
0567 
0568     if (result)
0569         return result;
0570 
0571     register_netdevice_notifier(&caif_device_notifier);
0572     dev_add_pack(&caif_packet_type);
0573 
0574     return result;
0575 }
0576 
0577 static void __exit caif_device_exit(void)
0578 {
0579     unregister_netdevice_notifier(&caif_device_notifier);
0580     dev_remove_pack(&caif_packet_type);
0581     unregister_pernet_subsys(&caif_net_ops);
0582 }
0583 
0584 module_init(caif_device_init);
0585 module_exit(caif_device_exit);