Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Generic PPP layer for Linux.
0004  *
0005  * Copyright 1999-2002 Paul Mackerras.
0006  *
0007  * The generic PPP layer handles the PPP network interfaces, the
0008  * /dev/ppp device, packet and VJ compression, and multilink.
0009  * It talks to PPP `channels' via the interface defined in
0010  * include/linux/ppp_channel.h.  Channels provide the basic means for
0011  * sending and receiving PPP frames on some kind of communications
0012  * channel.
0013  *
0014  * Part of the code in this driver was inspired by the old async-only
0015  * PPP driver, written by Michael Callahan and Al Longyear, and
0016  * subsequently hacked by Paul Mackerras.
0017  *
0018  * ==FILEVERSION 20041108==
0019  */
0020 
0021 #include <linux/module.h>
0022 #include <linux/kernel.h>
0023 #include <linux/sched/signal.h>
0024 #include <linux/kmod.h>
0025 #include <linux/init.h>
0026 #include <linux/list.h>
0027 #include <linux/idr.h>
0028 #include <linux/netdevice.h>
0029 #include <linux/poll.h>
0030 #include <linux/ppp_defs.h>
0031 #include <linux/filter.h>
0032 #include <linux/ppp-ioctl.h>
0033 #include <linux/ppp_channel.h>
0034 #include <linux/ppp-comp.h>
0035 #include <linux/skbuff.h>
0036 #include <linux/rtnetlink.h>
0037 #include <linux/if_arp.h>
0038 #include <linux/ip.h>
0039 #include <linux/tcp.h>
0040 #include <linux/spinlock.h>
0041 #include <linux/rwsem.h>
0042 #include <linux/stddef.h>
0043 #include <linux/device.h>
0044 #include <linux/mutex.h>
0045 #include <linux/slab.h>
0046 #include <linux/file.h>
0047 #include <asm/unaligned.h>
0048 #include <net/slhc_vj.h>
0049 #include <linux/atomic.h>
0050 #include <linux/refcount.h>
0051 
0052 #include <linux/nsproxy.h>
0053 #include <net/net_namespace.h>
0054 #include <net/netns/generic.h>
0055 
0056 #define PPP_VERSION "2.4.2"
0057 
0058 /*
0059  * Network protocols we support.
0060  */
0061 #define NP_IP   0       /* Internet Protocol V4 */
0062 #define NP_IPV6 1       /* Internet Protocol V6 */
0063 #define NP_IPX  2       /* IPX protocol */
0064 #define NP_AT   3       /* Appletalk protocol */
0065 #define NP_MPLS_UC 4        /* MPLS unicast */
0066 #define NP_MPLS_MC 5        /* MPLS multicast */
0067 #define NUM_NP  6       /* Number of NPs. */
0068 
0069 #define MPHDRLEN    6   /* multilink protocol header length */
0070 #define MPHDRLEN_SSN    4   /* ditto with short sequence numbers */
0071 
0072 #define PPP_PROTO_LEN   2
0073 
0074 /*
0075  * An instance of /dev/ppp can be associated with either a ppp
0076  * interface unit or a ppp channel.  In both cases, file->private_data
0077  * points to one of these.
0078  */
0079 struct ppp_file {
0080     enum {
0081         INTERFACE=1, CHANNEL
0082     }       kind;
0083     struct sk_buff_head xq;     /* pppd transmit queue */
0084     struct sk_buff_head rq;     /* receive queue for pppd */
0085     wait_queue_head_t rwait;    /* for poll on reading /dev/ppp */
0086     refcount_t  refcnt;     /* # refs (incl /dev/ppp attached) */
0087     int     hdrlen;     /* space to leave for headers */
0088     int     index;      /* interface unit / channel number */
0089     int     dead;       /* unit/channel has been shut down */
0090 };
0091 
0092 #define PF_TO_X(pf, X)      container_of(pf, X, file)
0093 
0094 #define PF_TO_PPP(pf)       PF_TO_X(pf, struct ppp)
0095 #define PF_TO_CHANNEL(pf)   PF_TO_X(pf, struct channel)
0096 
0097 /*
0098  * Data structure to hold primary network stats for which
0099  * we want to use 64 bit storage.  Other network stats
0100  * are stored in dev->stats of the ppp strucute.
0101  */
0102 struct ppp_link_stats {
0103     u64 rx_packets;
0104     u64 tx_packets;
0105     u64 rx_bytes;
0106     u64 tx_bytes;
0107 };
0108 
0109 /*
0110  * Data structure describing one ppp unit.
0111  * A ppp unit corresponds to a ppp network interface device
0112  * and represents a multilink bundle.
0113  * It can have 0 or more ppp channels connected to it.
0114  */
0115 struct ppp {
0116     struct ppp_file file;       /* stuff for read/write/poll 0 */
0117     struct file *owner;     /* file that owns this unit 48 */
0118     struct list_head channels;  /* list of attached channels 4c */
0119     int     n_channels; /* how many channels are attached 54 */
0120     spinlock_t  rlock;      /* lock for receive side 58 */
0121     spinlock_t  wlock;      /* lock for transmit side 5c */
0122     int __percpu    *xmit_recursion; /* xmit recursion detect */
0123     int     mru;        /* max receive unit 60 */
0124     unsigned int    flags;      /* control bits 64 */
0125     unsigned int    xstate;     /* transmit state bits 68 */
0126     unsigned int    rstate;     /* receive state bits 6c */
0127     int     debug;      /* debug flags 70 */
0128     struct slcompress *vj;      /* state for VJ header compression */
0129     enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
0130     struct sk_buff  *xmit_pending;  /* a packet ready to go out 88 */
0131     struct compressor *xcomp;   /* transmit packet compressor 8c */
0132     void        *xc_state;  /* its internal state 90 */
0133     struct compressor *rcomp;   /* receive decompressor 94 */
0134     void        *rc_state;  /* its internal state 98 */
0135     unsigned long   last_xmit;  /* jiffies when last pkt sent 9c */
0136     unsigned long   last_recv;  /* jiffies when last pkt rcvd a0 */
0137     struct net_device *dev;     /* network interface device a4 */
0138     int     closing;    /* is device closing down? a8 */
0139 #ifdef CONFIG_PPP_MULTILINK
0140     int     nxchan;     /* next channel to send something on */
0141     u32     nxseq;      /* next sequence number to send */
0142     int     mrru;       /* MP: max reconst. receive unit */
0143     u32     nextseq;    /* MP: seq no of next packet */
0144     u32     minseq;     /* MP: min of most recent seqnos */
0145     struct sk_buff_head mrq;    /* MP: receive reconstruction queue */
0146 #endif /* CONFIG_PPP_MULTILINK */
0147 #ifdef CONFIG_PPP_FILTER
0148     struct bpf_prog *pass_filter;   /* filter for packets to pass */
0149     struct bpf_prog *active_filter; /* filter for pkts to reset idle */
0150 #endif /* CONFIG_PPP_FILTER */
0151     struct net  *ppp_net;   /* the net we belong to */
0152     struct ppp_link_stats stats64;  /* 64 bit network stats */
0153 };
0154 
0155 /*
0156  * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
0157  * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
0158  * SC_MUST_COMP
0159  * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
0160  * Bits in xstate: SC_COMP_RUN
0161  */
0162 #define SC_FLAG_BITS    (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
0163              |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
0164              |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
0165 
0166 /*
0167  * Private data structure for each channel.
0168  * This includes the data structure used for multilink.
0169  */
0170 struct channel {
0171     struct ppp_file file;       /* stuff for read/write/poll */
0172     struct list_head list;      /* link in all/new_channels list */
0173     struct ppp_channel *chan;   /* public channel data structure */
0174     struct rw_semaphore chan_sem;   /* protects `chan' during chan ioctl */
0175     spinlock_t  downl;      /* protects `chan', file.xq dequeue */
0176     struct ppp  *ppp;       /* ppp unit we're connected to */
0177     struct net  *chan_net;  /* the net channel belongs to */
0178     netns_tracker   ns_tracker;
0179     struct list_head clist;     /* link in list of channels per unit */
0180     rwlock_t    upl;        /* protects `ppp' and 'bridge' */
0181     struct channel __rcu *bridge;   /* "bridged" ppp channel */
0182 #ifdef CONFIG_PPP_MULTILINK
0183     u8      avail;      /* flag used in multilink stuff */
0184     u8      had_frag;   /* >= 1 fragments have been sent */
0185     u32     lastseq;    /* MP: last sequence # received */
0186     int     speed;      /* speed of the corresponding ppp channel*/
0187 #endif /* CONFIG_PPP_MULTILINK */
0188 };
0189 
0190 struct ppp_config {
0191     struct file *file;
0192     s32 unit;
0193     bool ifname_is_set;
0194 };
0195 
0196 /*
0197  * SMP locking issues:
0198  * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
0199  * list and the ppp.n_channels field, you need to take both locks
0200  * before you modify them.
0201  * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
0202  * channel.downl.
0203  */
0204 
0205 static DEFINE_MUTEX(ppp_mutex);
0206 static atomic_t ppp_unit_count = ATOMIC_INIT(0);
0207 static atomic_t channel_count = ATOMIC_INIT(0);
0208 
0209 /* per-net private data for this module */
0210 static unsigned int ppp_net_id __read_mostly;
0211 struct ppp_net {
0212     /* units to ppp mapping */
0213     struct idr units_idr;
0214 
0215     /*
0216      * all_ppp_mutex protects the units_idr mapping.
0217      * It also ensures that finding a ppp unit in the units_idr
0218      * map and updating its file.refcnt field is atomic.
0219      */
0220     struct mutex all_ppp_mutex;
0221 
0222     /* channels */
0223     struct list_head all_channels;
0224     struct list_head new_channels;
0225     int last_channel_index;
0226 
0227     /*
0228      * all_channels_lock protects all_channels and
0229      * last_channel_index, and the atomicity of find
0230      * a channel and updating its file.refcnt field.
0231      */
0232     spinlock_t all_channels_lock;
0233 };
0234 
0235 /* Get the PPP protocol number from a skb */
0236 #define PPP_PROTO(skb)  get_unaligned_be16((skb)->data)
0237 
0238 /* We limit the length of ppp->file.rq to this (arbitrary) value */
0239 #define PPP_MAX_RQLEN   32
0240 
0241 /*
0242  * Maximum number of multilink fragments queued up.
0243  * This has to be large enough to cope with the maximum latency of
0244  * the slowest channel relative to the others.  Strictly it should
0245  * depend on the number of channels and their characteristics.
0246  */
0247 #define PPP_MP_MAX_QLEN 128
0248 
0249 /* Multilink header bits. */
0250 #define B   0x80        /* this fragment begins a packet */
0251 #define E   0x40        /* this fragment ends a packet */
0252 
0253 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */
0254 #define seq_before(a, b)    ((s32)((a) - (b)) < 0)
0255 #define seq_after(a, b)     ((s32)((a) - (b)) > 0)
0256 
0257 /* Prototypes. */
0258 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
0259             struct file *file, unsigned int cmd, unsigned long arg);
0260 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
0261 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
0262 static void ppp_push(struct ppp *ppp);
0263 static void ppp_channel_push(struct channel *pch);
0264 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
0265                   struct channel *pch);
0266 static void ppp_receive_error(struct ppp *ppp);
0267 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
0268 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
0269                         struct sk_buff *skb);
0270 #ifdef CONFIG_PPP_MULTILINK
0271 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
0272                 struct channel *pch);
0273 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
0274 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
0275 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
0276 #endif /* CONFIG_PPP_MULTILINK */
0277 static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
0278 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
0279 static void ppp_ccp_closed(struct ppp *ppp);
0280 static struct compressor *find_compressor(int type);
0281 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
0282 static int ppp_create_interface(struct net *net, struct file *file, int *unit);
0283 static void init_ppp_file(struct ppp_file *pf, int kind);
0284 static void ppp_destroy_interface(struct ppp *ppp);
0285 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
0286 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
0287 static int ppp_connect_channel(struct channel *pch, int unit);
0288 static int ppp_disconnect_channel(struct channel *pch);
0289 static void ppp_destroy_channel(struct channel *pch);
0290 static int unit_get(struct idr *p, void *ptr, int min);
0291 static int unit_set(struct idr *p, void *ptr, int n);
0292 static void unit_put(struct idr *p, int n);
0293 static void *unit_find(struct idr *p, int n);
0294 static void ppp_setup(struct net_device *dev);
0295 
0296 static const struct net_device_ops ppp_netdev_ops;
0297 
0298 static struct class *ppp_class;
0299 
0300 /* per net-namespace data */
0301 static inline struct ppp_net *ppp_pernet(struct net *net)
0302 {
0303     return net_generic(net, ppp_net_id);
0304 }
0305 
0306 /* Translates a PPP protocol number to a NP index (NP == network protocol) */
0307 static inline int proto_to_npindex(int proto)
0308 {
0309     switch (proto) {
0310     case PPP_IP:
0311         return NP_IP;
0312     case PPP_IPV6:
0313         return NP_IPV6;
0314     case PPP_IPX:
0315         return NP_IPX;
0316     case PPP_AT:
0317         return NP_AT;
0318     case PPP_MPLS_UC:
0319         return NP_MPLS_UC;
0320     case PPP_MPLS_MC:
0321         return NP_MPLS_MC;
0322     }
0323     return -EINVAL;
0324 }
0325 
0326 /* Translates an NP index into a PPP protocol number */
0327 static const int npindex_to_proto[NUM_NP] = {
0328     PPP_IP,
0329     PPP_IPV6,
0330     PPP_IPX,
0331     PPP_AT,
0332     PPP_MPLS_UC,
0333     PPP_MPLS_MC,
0334 };
0335 
0336 /* Translates an ethertype into an NP index */
0337 static inline int ethertype_to_npindex(int ethertype)
0338 {
0339     switch (ethertype) {
0340     case ETH_P_IP:
0341         return NP_IP;
0342     case ETH_P_IPV6:
0343         return NP_IPV6;
0344     case ETH_P_IPX:
0345         return NP_IPX;
0346     case ETH_P_PPPTALK:
0347     case ETH_P_ATALK:
0348         return NP_AT;
0349     case ETH_P_MPLS_UC:
0350         return NP_MPLS_UC;
0351     case ETH_P_MPLS_MC:
0352         return NP_MPLS_MC;
0353     }
0354     return -1;
0355 }
0356 
0357 /* Translates an NP index into an ethertype */
0358 static const int npindex_to_ethertype[NUM_NP] = {
0359     ETH_P_IP,
0360     ETH_P_IPV6,
0361     ETH_P_IPX,
0362     ETH_P_PPPTALK,
0363     ETH_P_MPLS_UC,
0364     ETH_P_MPLS_MC,
0365 };
0366 
0367 /*
0368  * Locking shorthand.
0369  */
0370 #define ppp_xmit_lock(ppp)  spin_lock_bh(&(ppp)->wlock)
0371 #define ppp_xmit_unlock(ppp)    spin_unlock_bh(&(ppp)->wlock)
0372 #define ppp_recv_lock(ppp)  spin_lock_bh(&(ppp)->rlock)
0373 #define ppp_recv_unlock(ppp)    spin_unlock_bh(&(ppp)->rlock)
0374 #define ppp_lock(ppp)       do { ppp_xmit_lock(ppp); \
0375                      ppp_recv_lock(ppp); } while (0)
0376 #define ppp_unlock(ppp)     do { ppp_recv_unlock(ppp); \
0377                      ppp_xmit_unlock(ppp); } while (0)
0378 
0379 /*
0380  * /dev/ppp device routines.
0381  * The /dev/ppp device is used by pppd to control the ppp unit.
0382  * It supports the read, write, ioctl and poll functions.
0383  * Open instances of /dev/ppp can be in one of three states:
0384  * unattached, attached to a ppp unit, or attached to a ppp channel.
0385  */
0386 static int ppp_open(struct inode *inode, struct file *file)
0387 {
0388     /*
0389      * This could (should?) be enforced by the permissions on /dev/ppp.
0390      */
0391     if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
0392         return -EPERM;
0393     return 0;
0394 }
0395 
0396 static int ppp_release(struct inode *unused, struct file *file)
0397 {
0398     struct ppp_file *pf = file->private_data;
0399     struct ppp *ppp;
0400 
0401     if (pf) {
0402         file->private_data = NULL;
0403         if (pf->kind == INTERFACE) {
0404             ppp = PF_TO_PPP(pf);
0405             rtnl_lock();
0406             if (file == ppp->owner)
0407                 unregister_netdevice(ppp->dev);
0408             rtnl_unlock();
0409         }
0410         if (refcount_dec_and_test(&pf->refcnt)) {
0411             switch (pf->kind) {
0412             case INTERFACE:
0413                 ppp_destroy_interface(PF_TO_PPP(pf));
0414                 break;
0415             case CHANNEL:
0416                 ppp_destroy_channel(PF_TO_CHANNEL(pf));
0417                 break;
0418             }
0419         }
0420     }
0421     return 0;
0422 }
0423 
0424 static ssize_t ppp_read(struct file *file, char __user *buf,
0425             size_t count, loff_t *ppos)
0426 {
0427     struct ppp_file *pf = file->private_data;
0428     DECLARE_WAITQUEUE(wait, current);
0429     ssize_t ret;
0430     struct sk_buff *skb = NULL;
0431     struct iovec iov;
0432     struct iov_iter to;
0433 
0434     ret = count;
0435 
0436     if (!pf)
0437         return -ENXIO;
0438     add_wait_queue(&pf->rwait, &wait);
0439     for (;;) {
0440         set_current_state(TASK_INTERRUPTIBLE);
0441         skb = skb_dequeue(&pf->rq);
0442         if (skb)
0443             break;
0444         ret = 0;
0445         if (pf->dead)
0446             break;
0447         if (pf->kind == INTERFACE) {
0448             /*
0449              * Return 0 (EOF) on an interface that has no
0450              * channels connected, unless it is looping
0451              * network traffic (demand mode).
0452              */
0453             struct ppp *ppp = PF_TO_PPP(pf);
0454 
0455             ppp_recv_lock(ppp);
0456             if (ppp->n_channels == 0 &&
0457                 (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
0458                 ppp_recv_unlock(ppp);
0459                 break;
0460             }
0461             ppp_recv_unlock(ppp);
0462         }
0463         ret = -EAGAIN;
0464         if (file->f_flags & O_NONBLOCK)
0465             break;
0466         ret = -ERESTARTSYS;
0467         if (signal_pending(current))
0468             break;
0469         schedule();
0470     }
0471     set_current_state(TASK_RUNNING);
0472     remove_wait_queue(&pf->rwait, &wait);
0473 
0474     if (!skb)
0475         goto out;
0476 
0477     ret = -EOVERFLOW;
0478     if (skb->len > count)
0479         goto outf;
0480     ret = -EFAULT;
0481     iov.iov_base = buf;
0482     iov.iov_len = count;
0483     iov_iter_init(&to, READ, &iov, 1, count);
0484     if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
0485         goto outf;
0486     ret = skb->len;
0487 
0488  outf:
0489     kfree_skb(skb);
0490  out:
0491     return ret;
0492 }
0493 
0494 static ssize_t ppp_write(struct file *file, const char __user *buf,
0495              size_t count, loff_t *ppos)
0496 {
0497     struct ppp_file *pf = file->private_data;
0498     struct sk_buff *skb;
0499     ssize_t ret;
0500 
0501     if (!pf)
0502         return -ENXIO;
0503     /* All PPP packets should start with the 2-byte protocol */
0504     if (count < PPP_PROTO_LEN)
0505         return -EINVAL;
0506     ret = -ENOMEM;
0507     skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
0508     if (!skb)
0509         goto out;
0510     skb_reserve(skb, pf->hdrlen);
0511     ret = -EFAULT;
0512     if (copy_from_user(skb_put(skb, count), buf, count)) {
0513         kfree_skb(skb);
0514         goto out;
0515     }
0516 
0517     switch (pf->kind) {
0518     case INTERFACE:
0519         ppp_xmit_process(PF_TO_PPP(pf), skb);
0520         break;
0521     case CHANNEL:
0522         skb_queue_tail(&pf->xq, skb);
0523         ppp_channel_push(PF_TO_CHANNEL(pf));
0524         break;
0525     }
0526 
0527     ret = count;
0528 
0529  out:
0530     return ret;
0531 }
0532 
0533 /* No kernel lock - fine */
0534 static __poll_t ppp_poll(struct file *file, poll_table *wait)
0535 {
0536     struct ppp_file *pf = file->private_data;
0537     __poll_t mask;
0538 
0539     if (!pf)
0540         return 0;
0541     poll_wait(file, &pf->rwait, wait);
0542     mask = EPOLLOUT | EPOLLWRNORM;
0543     if (skb_peek(&pf->rq))
0544         mask |= EPOLLIN | EPOLLRDNORM;
0545     if (pf->dead)
0546         mask |= EPOLLHUP;
0547     else if (pf->kind == INTERFACE) {
0548         /* see comment in ppp_read */
0549         struct ppp *ppp = PF_TO_PPP(pf);
0550 
0551         ppp_recv_lock(ppp);
0552         if (ppp->n_channels == 0 &&
0553             (ppp->flags & SC_LOOP_TRAFFIC) == 0)
0554             mask |= EPOLLIN | EPOLLRDNORM;
0555         ppp_recv_unlock(ppp);
0556     }
0557 
0558     return mask;
0559 }
0560 
0561 #ifdef CONFIG_PPP_FILTER
0562 static struct bpf_prog *get_filter(struct sock_fprog *uprog)
0563 {
0564     struct sock_fprog_kern fprog;
0565     struct bpf_prog *res = NULL;
0566     int err;
0567 
0568     if (!uprog->len)
0569         return NULL;
0570 
0571     /* uprog->len is unsigned short, so no overflow here */
0572     fprog.len = uprog->len;
0573     fprog.filter = memdup_user(uprog->filter,
0574                    uprog->len * sizeof(struct sock_filter));
0575     if (IS_ERR(fprog.filter))
0576         return ERR_CAST(fprog.filter);
0577 
0578     err = bpf_prog_create(&res, &fprog);
0579     kfree(fprog.filter);
0580 
0581     return err ? ERR_PTR(err) : res;
0582 }
0583 
0584 static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
0585 {
0586     struct sock_fprog uprog;
0587 
0588     if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
0589         return ERR_PTR(-EFAULT);
0590     return get_filter(&uprog);
0591 }
0592 
0593 #ifdef CONFIG_COMPAT
0594 struct sock_fprog32 {
0595     unsigned short len;
0596     compat_caddr_t filter;
0597 };
0598 
0599 #define PPPIOCSPASS32       _IOW('t', 71, struct sock_fprog32)
0600 #define PPPIOCSACTIVE32     _IOW('t', 70, struct sock_fprog32)
0601 
0602 static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
0603 {
0604     struct sock_fprog32 uprog32;
0605     struct sock_fprog uprog;
0606 
0607     if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
0608         return ERR_PTR(-EFAULT);
0609     uprog.len = uprog32.len;
0610     uprog.filter = compat_ptr(uprog32.filter);
0611     return get_filter(&uprog);
0612 }
0613 #endif
0614 #endif
0615 
0616 /* Bridge one PPP channel to another.
0617  * When two channels are bridged, ppp_input on one channel is redirected to
0618  * the other's ops->start_xmit handler.
0619  * In order to safely bridge channels we must reject channels which are already
0620  * part of a bridge instance, or which form part of an existing unit.
0621  * Once successfully bridged, each channel holds a reference on the other
0622  * to prevent it being freed while the bridge is extant.
0623  */
0624 static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
0625 {
0626     write_lock_bh(&pch->upl);
0627     if (pch->ppp ||
0628         rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
0629         write_unlock_bh(&pch->upl);
0630         return -EALREADY;
0631     }
0632     refcount_inc(&pchb->file.refcnt);
0633     rcu_assign_pointer(pch->bridge, pchb);
0634     write_unlock_bh(&pch->upl);
0635 
0636     write_lock_bh(&pchb->upl);
0637     if (pchb->ppp ||
0638         rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
0639         write_unlock_bh(&pchb->upl);
0640         goto err_unset;
0641     }
0642     refcount_inc(&pch->file.refcnt);
0643     rcu_assign_pointer(pchb->bridge, pch);
0644     write_unlock_bh(&pchb->upl);
0645 
0646     return 0;
0647 
0648 err_unset:
0649     write_lock_bh(&pch->upl);
0650     /* Re-read pch->bridge with upl held in case it was modified concurrently */
0651     pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
0652     RCU_INIT_POINTER(pch->bridge, NULL);
0653     write_unlock_bh(&pch->upl);
0654     synchronize_rcu();
0655 
0656     if (pchb)
0657         if (refcount_dec_and_test(&pchb->file.refcnt))
0658             ppp_destroy_channel(pchb);
0659 
0660     return -EALREADY;
0661 }
0662 
0663 static int ppp_unbridge_channels(struct channel *pch)
0664 {
0665     struct channel *pchb, *pchbb;
0666 
0667     write_lock_bh(&pch->upl);
0668     pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
0669     if (!pchb) {
0670         write_unlock_bh(&pch->upl);
0671         return -EINVAL;
0672     }
0673     RCU_INIT_POINTER(pch->bridge, NULL);
0674     write_unlock_bh(&pch->upl);
0675 
0676     /* Only modify pchb if phcb->bridge points back to pch.
0677      * If not, it implies that there has been a race unbridging (and possibly
0678      * even rebridging) pchb.  We should leave pchb alone to avoid either a
0679      * refcount underflow, or breaking another established bridge instance.
0680      */
0681     write_lock_bh(&pchb->upl);
0682     pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
0683     if (pchbb == pch)
0684         RCU_INIT_POINTER(pchb->bridge, NULL);
0685     write_unlock_bh(&pchb->upl);
0686 
0687     synchronize_rcu();
0688 
0689     if (pchbb == pch)
0690         if (refcount_dec_and_test(&pch->file.refcnt))
0691             ppp_destroy_channel(pch);
0692 
0693     if (refcount_dec_and_test(&pchb->file.refcnt))
0694         ppp_destroy_channel(pchb);
0695 
0696     return 0;
0697 }
0698 
0699 static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
0700 {
0701     struct ppp_file *pf;
0702     struct ppp *ppp;
0703     int err = -EFAULT, val, val2, i;
0704     struct ppp_idle32 idle32;
0705     struct ppp_idle64 idle64;
0706     struct npioctl npi;
0707     int unit, cflags;
0708     struct slcompress *vj;
0709     void __user *argp = (void __user *)arg;
0710     int __user *p = argp;
0711 
0712     mutex_lock(&ppp_mutex);
0713 
0714     pf = file->private_data;
0715     if (!pf) {
0716         err = ppp_unattached_ioctl(current->nsproxy->net_ns,
0717                        pf, file, cmd, arg);
0718         goto out;
0719     }
0720 
0721     if (cmd == PPPIOCDETACH) {
0722         /*
0723          * PPPIOCDETACH is no longer supported as it was heavily broken,
0724          * and is only known to have been used by pppd older than
0725          * ppp-2.4.2 (released November 2003).
0726          */
0727         pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
0728                  current->comm, current->pid);
0729         err = -EINVAL;
0730         goto out;
0731     }
0732 
0733     if (pf->kind == CHANNEL) {
0734         struct channel *pch, *pchb;
0735         struct ppp_channel *chan;
0736         struct ppp_net *pn;
0737 
0738         pch = PF_TO_CHANNEL(pf);
0739 
0740         switch (cmd) {
0741         case PPPIOCCONNECT:
0742             if (get_user(unit, p))
0743                 break;
0744             err = ppp_connect_channel(pch, unit);
0745             break;
0746 
0747         case PPPIOCDISCONN:
0748             err = ppp_disconnect_channel(pch);
0749             break;
0750 
0751         case PPPIOCBRIDGECHAN:
0752             if (get_user(unit, p))
0753                 break;
0754             err = -ENXIO;
0755             pn = ppp_pernet(current->nsproxy->net_ns);
0756             spin_lock_bh(&pn->all_channels_lock);
0757             pchb = ppp_find_channel(pn, unit);
0758             /* Hold a reference to prevent pchb being freed while
0759              * we establish the bridge.
0760              */
0761             if (pchb)
0762                 refcount_inc(&pchb->file.refcnt);
0763             spin_unlock_bh(&pn->all_channels_lock);
0764             if (!pchb)
0765                 break;
0766             err = ppp_bridge_channels(pch, pchb);
0767             /* Drop earlier refcount now bridge establishment is complete */
0768             if (refcount_dec_and_test(&pchb->file.refcnt))
0769                 ppp_destroy_channel(pchb);
0770             break;
0771 
0772         case PPPIOCUNBRIDGECHAN:
0773             err = ppp_unbridge_channels(pch);
0774             break;
0775 
0776         default:
0777             down_read(&pch->chan_sem);
0778             chan = pch->chan;
0779             err = -ENOTTY;
0780             if (chan && chan->ops->ioctl)
0781                 err = chan->ops->ioctl(chan, cmd, arg);
0782             up_read(&pch->chan_sem);
0783         }
0784         goto out;
0785     }
0786 
0787     if (pf->kind != INTERFACE) {
0788         /* can't happen */
0789         pr_err("PPP: not interface or channel??\n");
0790         err = -EINVAL;
0791         goto out;
0792     }
0793 
0794     ppp = PF_TO_PPP(pf);
0795     switch (cmd) {
0796     case PPPIOCSMRU:
0797         if (get_user(val, p))
0798             break;
0799         ppp->mru = val;
0800         err = 0;
0801         break;
0802 
0803     case PPPIOCSFLAGS:
0804         if (get_user(val, p))
0805             break;
0806         ppp_lock(ppp);
0807         cflags = ppp->flags & ~val;
0808 #ifdef CONFIG_PPP_MULTILINK
0809         if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
0810             ppp->nextseq = 0;
0811 #endif
0812         ppp->flags = val & SC_FLAG_BITS;
0813         ppp_unlock(ppp);
0814         if (cflags & SC_CCP_OPEN)
0815             ppp_ccp_closed(ppp);
0816         err = 0;
0817         break;
0818 
0819     case PPPIOCGFLAGS:
0820         val = ppp->flags | ppp->xstate | ppp->rstate;
0821         if (put_user(val, p))
0822             break;
0823         err = 0;
0824         break;
0825 
0826     case PPPIOCSCOMPRESS:
0827     {
0828         struct ppp_option_data data;
0829         if (copy_from_user(&data, argp, sizeof(data)))
0830             err = -EFAULT;
0831         else
0832             err = ppp_set_compress(ppp, &data);
0833         break;
0834     }
0835     case PPPIOCGUNIT:
0836         if (put_user(ppp->file.index, p))
0837             break;
0838         err = 0;
0839         break;
0840 
0841     case PPPIOCSDEBUG:
0842         if (get_user(val, p))
0843             break;
0844         ppp->debug = val;
0845         err = 0;
0846         break;
0847 
0848     case PPPIOCGDEBUG:
0849         if (put_user(ppp->debug, p))
0850             break;
0851         err = 0;
0852         break;
0853 
0854     case PPPIOCGIDLE32:
0855                 idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
0856                 idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
0857                 if (copy_to_user(argp, &idle32, sizeof(idle32)))
0858             break;
0859         err = 0;
0860         break;
0861 
0862     case PPPIOCGIDLE64:
0863         idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
0864         idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
0865         if (copy_to_user(argp, &idle64, sizeof(idle64)))
0866             break;
0867         err = 0;
0868         break;
0869 
0870     case PPPIOCSMAXCID:
0871         if (get_user(val, p))
0872             break;
0873         val2 = 15;
0874         if ((val >> 16) != 0) {
0875             val2 = val >> 16;
0876             val &= 0xffff;
0877         }
0878         vj = slhc_init(val2+1, val+1);
0879         if (IS_ERR(vj)) {
0880             err = PTR_ERR(vj);
0881             break;
0882         }
0883         ppp_lock(ppp);
0884         if (ppp->vj)
0885             slhc_free(ppp->vj);
0886         ppp->vj = vj;
0887         ppp_unlock(ppp);
0888         err = 0;
0889         break;
0890 
0891     case PPPIOCGNPMODE:
0892     case PPPIOCSNPMODE:
0893         if (copy_from_user(&npi, argp, sizeof(npi)))
0894             break;
0895         err = proto_to_npindex(npi.protocol);
0896         if (err < 0)
0897             break;
0898         i = err;
0899         if (cmd == PPPIOCGNPMODE) {
0900             err = -EFAULT;
0901             npi.mode = ppp->npmode[i];
0902             if (copy_to_user(argp, &npi, sizeof(npi)))
0903                 break;
0904         } else {
0905             ppp->npmode[i] = npi.mode;
0906             /* we may be able to transmit more packets now (??) */
0907             netif_wake_queue(ppp->dev);
0908         }
0909         err = 0;
0910         break;
0911 
0912 #ifdef CONFIG_PPP_FILTER
0913     case PPPIOCSPASS:
0914     case PPPIOCSACTIVE:
0915     {
0916         struct bpf_prog *filter = ppp_get_filter(argp);
0917         struct bpf_prog **which;
0918 
0919         if (IS_ERR(filter)) {
0920             err = PTR_ERR(filter);
0921             break;
0922         }
0923         if (cmd == PPPIOCSPASS)
0924             which = &ppp->pass_filter;
0925         else
0926             which = &ppp->active_filter;
0927         ppp_lock(ppp);
0928         if (*which)
0929             bpf_prog_destroy(*which);
0930         *which = filter;
0931         ppp_unlock(ppp);
0932         err = 0;
0933         break;
0934     }
0935 #endif /* CONFIG_PPP_FILTER */
0936 
0937 #ifdef CONFIG_PPP_MULTILINK
0938     case PPPIOCSMRRU:
0939         if (get_user(val, p))
0940             break;
0941         ppp_recv_lock(ppp);
0942         ppp->mrru = val;
0943         ppp_recv_unlock(ppp);
0944         err = 0;
0945         break;
0946 #endif /* CONFIG_PPP_MULTILINK */
0947 
0948     default:
0949         err = -ENOTTY;
0950     }
0951 
0952 out:
0953     mutex_unlock(&ppp_mutex);
0954 
0955     return err;
0956 }
0957 
0958 #ifdef CONFIG_COMPAT
0959 struct ppp_option_data32 {
0960     compat_uptr_t       ptr;
0961     u32         length;
0962     compat_int_t        transmit;
0963 };
0964 #define PPPIOCSCOMPRESS32   _IOW('t', 77, struct ppp_option_data32)
0965 
0966 static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
0967 {
0968     struct ppp_file *pf;
0969     int err = -ENOIOCTLCMD;
0970     void __user *argp = (void __user *)arg;
0971 
0972     mutex_lock(&ppp_mutex);
0973 
0974     pf = file->private_data;
0975     if (pf && pf->kind == INTERFACE) {
0976         struct ppp *ppp = PF_TO_PPP(pf);
0977         switch (cmd) {
0978 #ifdef CONFIG_PPP_FILTER
0979         case PPPIOCSPASS32:
0980         case PPPIOCSACTIVE32:
0981         {
0982             struct bpf_prog *filter = compat_ppp_get_filter(argp);
0983             struct bpf_prog **which;
0984 
0985             if (IS_ERR(filter)) {
0986                 err = PTR_ERR(filter);
0987                 break;
0988             }
0989             if (cmd == PPPIOCSPASS32)
0990                 which = &ppp->pass_filter;
0991             else
0992                 which = &ppp->active_filter;
0993             ppp_lock(ppp);
0994             if (*which)
0995                 bpf_prog_destroy(*which);
0996             *which = filter;
0997             ppp_unlock(ppp);
0998             err = 0;
0999             break;
1000         }
1001 #endif /* CONFIG_PPP_FILTER */
1002         case PPPIOCSCOMPRESS32:
1003         {
1004             struct ppp_option_data32 data32;
1005             if (copy_from_user(&data32, argp, sizeof(data32))) {
1006                 err = -EFAULT;
1007             } else {
1008                 struct ppp_option_data data = {
1009                     .ptr = compat_ptr(data32.ptr),
1010                     .length = data32.length,
1011                     .transmit = data32.transmit
1012                 };
1013                 err = ppp_set_compress(ppp, &data);
1014             }
1015             break;
1016         }
1017         }
1018     }
1019     mutex_unlock(&ppp_mutex);
1020 
1021     /* all other commands have compatible arguments */
1022     if (err == -ENOIOCTLCMD)
1023         err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1024 
1025     return err;
1026 }
1027 #endif
1028 
1029 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
1030             struct file *file, unsigned int cmd, unsigned long arg)
1031 {
1032     int unit, err = -EFAULT;
1033     struct ppp *ppp;
1034     struct channel *chan;
1035     struct ppp_net *pn;
1036     int __user *p = (int __user *)arg;
1037 
1038     switch (cmd) {
1039     case PPPIOCNEWUNIT:
1040         /* Create a new ppp unit */
1041         if (get_user(unit, p))
1042             break;
1043         err = ppp_create_interface(net, file, &unit);
1044         if (err < 0)
1045             break;
1046 
1047         err = -EFAULT;
1048         if (put_user(unit, p))
1049             break;
1050         err = 0;
1051         break;
1052 
1053     case PPPIOCATTACH:
1054         /* Attach to an existing ppp unit */
1055         if (get_user(unit, p))
1056             break;
1057         err = -ENXIO;
1058         pn = ppp_pernet(net);
1059         mutex_lock(&pn->all_ppp_mutex);
1060         ppp = ppp_find_unit(pn, unit);
1061         if (ppp) {
1062             refcount_inc(&ppp->file.refcnt);
1063             file->private_data = &ppp->file;
1064             err = 0;
1065         }
1066         mutex_unlock(&pn->all_ppp_mutex);
1067         break;
1068 
1069     case PPPIOCATTCHAN:
1070         if (get_user(unit, p))
1071             break;
1072         err = -ENXIO;
1073         pn = ppp_pernet(net);
1074         spin_lock_bh(&pn->all_channels_lock);
1075         chan = ppp_find_channel(pn, unit);
1076         if (chan) {
1077             refcount_inc(&chan->file.refcnt);
1078             file->private_data = &chan->file;
1079             err = 0;
1080         }
1081         spin_unlock_bh(&pn->all_channels_lock);
1082         break;
1083 
1084     default:
1085         err = -ENOTTY;
1086     }
1087 
1088     return err;
1089 }
1090 
1091 static const struct file_operations ppp_device_fops = {
1092     .owner      = THIS_MODULE,
1093     .read       = ppp_read,
1094     .write      = ppp_write,
1095     .poll       = ppp_poll,
1096     .unlocked_ioctl = ppp_ioctl,
1097 #ifdef CONFIG_COMPAT
1098     .compat_ioctl   = ppp_compat_ioctl,
1099 #endif
1100     .open       = ppp_open,
1101     .release    = ppp_release,
1102     .llseek     = noop_llseek,
1103 };
1104 
1105 static __net_init int ppp_init_net(struct net *net)
1106 {
1107     struct ppp_net *pn = net_generic(net, ppp_net_id);
1108 
1109     idr_init(&pn->units_idr);
1110     mutex_init(&pn->all_ppp_mutex);
1111 
1112     INIT_LIST_HEAD(&pn->all_channels);
1113     INIT_LIST_HEAD(&pn->new_channels);
1114 
1115     spin_lock_init(&pn->all_channels_lock);
1116 
1117     return 0;
1118 }
1119 
1120 static __net_exit void ppp_exit_net(struct net *net)
1121 {
1122     struct ppp_net *pn = net_generic(net, ppp_net_id);
1123     struct net_device *dev;
1124     struct net_device *aux;
1125     struct ppp *ppp;
1126     LIST_HEAD(list);
1127     int id;
1128 
1129     rtnl_lock();
1130     for_each_netdev_safe(net, dev, aux) {
1131         if (dev->netdev_ops == &ppp_netdev_ops)
1132             unregister_netdevice_queue(dev, &list);
1133     }
1134 
1135     idr_for_each_entry(&pn->units_idr, ppp, id)
1136         /* Skip devices already unregistered by previous loop */
1137         if (!net_eq(dev_net(ppp->dev), net))
1138             unregister_netdevice_queue(ppp->dev, &list);
1139 
1140     unregister_netdevice_many(&list);
1141     rtnl_unlock();
1142 
1143     mutex_destroy(&pn->all_ppp_mutex);
1144     idr_destroy(&pn->units_idr);
1145     WARN_ON_ONCE(!list_empty(&pn->all_channels));
1146     WARN_ON_ONCE(!list_empty(&pn->new_channels));
1147 }
1148 
1149 static struct pernet_operations ppp_net_ops = {
1150     .init = ppp_init_net,
1151     .exit = ppp_exit_net,
1152     .id   = &ppp_net_id,
1153     .size = sizeof(struct ppp_net),
1154 };
1155 
1156 static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1157 {
1158     struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1159     int ret;
1160 
1161     mutex_lock(&pn->all_ppp_mutex);
1162 
1163     if (unit < 0) {
1164         ret = unit_get(&pn->units_idr, ppp, 0);
1165         if (ret < 0)
1166             goto err;
1167         if (!ifname_is_set) {
1168             while (1) {
1169                 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
1170                 if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
1171                     break;
1172                 unit_put(&pn->units_idr, ret);
1173                 ret = unit_get(&pn->units_idr, ppp, ret + 1);
1174                 if (ret < 0)
1175                     goto err;
1176             }
1177         }
1178     } else {
1179         /* Caller asked for a specific unit number. Fail with -EEXIST
1180          * if unavailable. For backward compatibility, return -EEXIST
1181          * too if idr allocation fails; this makes pppd retry without
1182          * requesting a specific unit number.
1183          */
1184         if (unit_find(&pn->units_idr, unit)) {
1185             ret = -EEXIST;
1186             goto err;
1187         }
1188         ret = unit_set(&pn->units_idr, ppp, unit);
1189         if (ret < 0) {
1190             /* Rewrite error for backward compatibility */
1191             ret = -EEXIST;
1192             goto err;
1193         }
1194     }
1195     ppp->file.index = ret;
1196 
1197     if (!ifname_is_set)
1198         snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1199 
1200     mutex_unlock(&pn->all_ppp_mutex);
1201 
1202     ret = register_netdevice(ppp->dev);
1203     if (ret < 0)
1204         goto err_unit;
1205 
1206     atomic_inc(&ppp_unit_count);
1207 
1208     return 0;
1209 
1210 err_unit:
1211     mutex_lock(&pn->all_ppp_mutex);
1212     unit_put(&pn->units_idr, ppp->file.index);
1213 err:
1214     mutex_unlock(&pn->all_ppp_mutex);
1215 
1216     return ret;
1217 }
1218 
1219 static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1220                  const struct ppp_config *conf)
1221 {
1222     struct ppp *ppp = netdev_priv(dev);
1223     int indx;
1224     int err;
1225     int cpu;
1226 
1227     ppp->dev = dev;
1228     ppp->ppp_net = src_net;
1229     ppp->mru = PPP_MRU;
1230     ppp->owner = conf->file;
1231 
1232     init_ppp_file(&ppp->file, INTERFACE);
1233     ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
1234 
1235     for (indx = 0; indx < NUM_NP; ++indx)
1236         ppp->npmode[indx] = NPMODE_PASS;
1237     INIT_LIST_HEAD(&ppp->channels);
1238     spin_lock_init(&ppp->rlock);
1239     spin_lock_init(&ppp->wlock);
1240 
1241     ppp->xmit_recursion = alloc_percpu(int);
1242     if (!ppp->xmit_recursion) {
1243         err = -ENOMEM;
1244         goto err1;
1245     }
1246     for_each_possible_cpu(cpu)
1247         (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1248 
1249 #ifdef CONFIG_PPP_MULTILINK
1250     ppp->minseq = -1;
1251     skb_queue_head_init(&ppp->mrq);
1252 #endif /* CONFIG_PPP_MULTILINK */
1253 #ifdef CONFIG_PPP_FILTER
1254     ppp->pass_filter = NULL;
1255     ppp->active_filter = NULL;
1256 #endif /* CONFIG_PPP_FILTER */
1257 
1258     err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1259     if (err < 0)
1260         goto err2;
1261 
1262     conf->file->private_data = &ppp->file;
1263 
1264     return 0;
1265 err2:
1266     free_percpu(ppp->xmit_recursion);
1267 err1:
1268     return err;
1269 }
1270 
1271 static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
1272     [IFLA_PPP_DEV_FD]   = { .type = NLA_S32 },
1273 };
1274 
1275 static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
1276                struct netlink_ext_ack *extack)
1277 {
1278     if (!data)
1279         return -EINVAL;
1280 
1281     if (!data[IFLA_PPP_DEV_FD])
1282         return -EINVAL;
1283     if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
1284         return -EBADF;
1285 
1286     return 0;
1287 }
1288 
1289 static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
1290               struct nlattr *tb[], struct nlattr *data[],
1291               struct netlink_ext_ack *extack)
1292 {
1293     struct ppp_config conf = {
1294         .unit = -1,
1295         .ifname_is_set = true,
1296     };
1297     struct file *file;
1298     int err;
1299 
1300     file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
1301     if (!file)
1302         return -EBADF;
1303 
1304     /* rtnl_lock is already held here, but ppp_create_interface() locks
1305      * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
1306      * possible deadlock due to lock order inversion, at the cost of
1307      * pushing the problem back to userspace.
1308      */
1309     if (!mutex_trylock(&ppp_mutex)) {
1310         err = -EBUSY;
1311         goto out;
1312     }
1313 
1314     if (file->f_op != &ppp_device_fops || file->private_data) {
1315         err = -EBADF;
1316         goto out_unlock;
1317     }
1318 
1319     conf.file = file;
1320 
1321     /* Don't use device name generated by the rtnetlink layer when ifname
1322      * isn't specified. Let ppp_dev_configure() set the device name using
1323      * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
1324      * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
1325      */
1326     if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
1327         conf.ifname_is_set = false;
1328 
1329     err = ppp_dev_configure(src_net, dev, &conf);
1330 
1331 out_unlock:
1332     mutex_unlock(&ppp_mutex);
1333 out:
1334     fput(file);
1335 
1336     return err;
1337 }
1338 
1339 static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
1340 {
1341     unregister_netdevice_queue(dev, head);
1342 }
1343 
1344 static size_t ppp_nl_get_size(const struct net_device *dev)
1345 {
1346     return 0;
1347 }
1348 
1349 static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1350 {
1351     return 0;
1352 }
1353 
1354 static struct net *ppp_nl_get_link_net(const struct net_device *dev)
1355 {
1356     struct ppp *ppp = netdev_priv(dev);
1357 
1358     return ppp->ppp_net;
1359 }
1360 
1361 static struct rtnl_link_ops ppp_link_ops __read_mostly = {
1362     .kind       = "ppp",
1363     .maxtype    = IFLA_PPP_MAX,
1364     .policy     = ppp_nl_policy,
1365     .priv_size  = sizeof(struct ppp),
1366     .setup      = ppp_setup,
1367     .validate   = ppp_nl_validate,
1368     .newlink    = ppp_nl_newlink,
1369     .dellink    = ppp_nl_dellink,
1370     .get_size   = ppp_nl_get_size,
1371     .fill_info  = ppp_nl_fill_info,
1372     .get_link_net   = ppp_nl_get_link_net,
1373 };
1374 
1375 #define PPP_MAJOR   108
1376 
1377 /* Called at boot time if ppp is compiled into the kernel,
1378    or at module load time (from init_module) if compiled as a module. */
1379 static int __init ppp_init(void)
1380 {
1381     int err;
1382 
1383     pr_info("PPP generic driver version " PPP_VERSION "\n");
1384 
1385     err = register_pernet_device(&ppp_net_ops);
1386     if (err) {
1387         pr_err("failed to register PPP pernet device (%d)\n", err);
1388         goto out;
1389     }
1390 
1391     err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
1392     if (err) {
1393         pr_err("failed to register PPP device (%d)\n", err);
1394         goto out_net;
1395     }
1396 
1397     ppp_class = class_create(THIS_MODULE, "ppp");
1398     if (IS_ERR(ppp_class)) {
1399         err = PTR_ERR(ppp_class);
1400         goto out_chrdev;
1401     }
1402 
1403     err = rtnl_link_register(&ppp_link_ops);
1404     if (err) {
1405         pr_err("failed to register rtnetlink PPP handler\n");
1406         goto out_class;
1407     }
1408 
1409     /* not a big deal if we fail here :-) */
1410     device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
1411 
1412     return 0;
1413 
1414 out_class:
1415     class_destroy(ppp_class);
1416 out_chrdev:
1417     unregister_chrdev(PPP_MAJOR, "ppp");
1418 out_net:
1419     unregister_pernet_device(&ppp_net_ops);
1420 out:
1421     return err;
1422 }
1423 
1424 /*
1425  * Network interface unit routines.
1426  */
1427 static netdev_tx_t
1428 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1429 {
1430     struct ppp *ppp = netdev_priv(dev);
1431     int npi, proto;
1432     unsigned char *pp;
1433 
1434     npi = ethertype_to_npindex(ntohs(skb->protocol));
1435     if (npi < 0)
1436         goto outf;
1437 
1438     /* Drop, accept or reject the packet */
1439     switch (ppp->npmode[npi]) {
1440     case NPMODE_PASS:
1441         break;
1442     case NPMODE_QUEUE:
1443         /* it would be nice to have a way to tell the network
1444            system to queue this one up for later. */
1445         goto outf;
1446     case NPMODE_DROP:
1447     case NPMODE_ERROR:
1448         goto outf;
1449     }
1450 
1451     /* Put the 2-byte PPP protocol number on the front,
1452        making sure there is room for the address and control fields. */
1453     if (skb_cow_head(skb, PPP_HDRLEN))
1454         goto outf;
1455 
1456     pp = skb_push(skb, 2);
1457     proto = npindex_to_proto[npi];
1458     put_unaligned_be16(proto, pp);
1459 
1460     skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1461     ppp_xmit_process(ppp, skb);
1462 
1463     return NETDEV_TX_OK;
1464 
1465  outf:
1466     kfree_skb(skb);
1467     ++dev->stats.tx_dropped;
1468     return NETDEV_TX_OK;
1469 }
1470 
1471 static int
1472 ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1473                void __user *addr, int cmd)
1474 {
1475     struct ppp *ppp = netdev_priv(dev);
1476     int err = -EFAULT;
1477     struct ppp_stats stats;
1478     struct ppp_comp_stats cstats;
1479     char *vers;
1480 
1481     switch (cmd) {
1482     case SIOCGPPPSTATS:
1483         ppp_get_stats(ppp, &stats);
1484         if (copy_to_user(addr, &stats, sizeof(stats)))
1485             break;
1486         err = 0;
1487         break;
1488 
1489     case SIOCGPPPCSTATS:
1490         memset(&cstats, 0, sizeof(cstats));
1491         if (ppp->xc_state)
1492             ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1493         if (ppp->rc_state)
1494             ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1495         if (copy_to_user(addr, &cstats, sizeof(cstats)))
1496             break;
1497         err = 0;
1498         break;
1499 
1500     case SIOCGPPPVER:
1501         vers = PPP_VERSION;
1502         if (copy_to_user(addr, vers, strlen(vers) + 1))
1503             break;
1504         err = 0;
1505         break;
1506 
1507     default:
1508         err = -EINVAL;
1509     }
1510 
1511     return err;
1512 }
1513 
1514 static void
1515 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1516 {
1517     struct ppp *ppp = netdev_priv(dev);
1518 
1519     ppp_recv_lock(ppp);
1520     stats64->rx_packets = ppp->stats64.rx_packets;
1521     stats64->rx_bytes   = ppp->stats64.rx_bytes;
1522     ppp_recv_unlock(ppp);
1523 
1524     ppp_xmit_lock(ppp);
1525     stats64->tx_packets = ppp->stats64.tx_packets;
1526     stats64->tx_bytes   = ppp->stats64.tx_bytes;
1527     ppp_xmit_unlock(ppp);
1528 
1529     stats64->rx_errors        = dev->stats.rx_errors;
1530     stats64->tx_errors        = dev->stats.tx_errors;
1531     stats64->rx_dropped       = dev->stats.rx_dropped;
1532     stats64->tx_dropped       = dev->stats.tx_dropped;
1533     stats64->rx_length_errors = dev->stats.rx_length_errors;
1534 }
1535 
1536 static int ppp_dev_init(struct net_device *dev)
1537 {
1538     struct ppp *ppp;
1539 
1540     netdev_lockdep_set_classes(dev);
1541 
1542     ppp = netdev_priv(dev);
1543     /* Let the netdevice take a reference on the ppp file. This ensures
1544      * that ppp_destroy_interface() won't run before the device gets
1545      * unregistered.
1546      */
1547     refcount_inc(&ppp->file.refcnt);
1548 
1549     return 0;
1550 }
1551 
1552 static void ppp_dev_uninit(struct net_device *dev)
1553 {
1554     struct ppp *ppp = netdev_priv(dev);
1555     struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1556 
1557     ppp_lock(ppp);
1558     ppp->closing = 1;
1559     ppp_unlock(ppp);
1560 
1561     mutex_lock(&pn->all_ppp_mutex);
1562     unit_put(&pn->units_idr, ppp->file.index);
1563     mutex_unlock(&pn->all_ppp_mutex);
1564 
1565     ppp->owner = NULL;
1566 
1567     ppp->file.dead = 1;
1568     wake_up_interruptible(&ppp->file.rwait);
1569 }
1570 
1571 static void ppp_dev_priv_destructor(struct net_device *dev)
1572 {
1573     struct ppp *ppp;
1574 
1575     ppp = netdev_priv(dev);
1576     if (refcount_dec_and_test(&ppp->file.refcnt))
1577         ppp_destroy_interface(ppp);
1578 }
1579 
1580 static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
1581                  struct net_device_path *path)
1582 {
1583     struct ppp *ppp = netdev_priv(ctx->dev);
1584     struct ppp_channel *chan;
1585     struct channel *pch;
1586 
1587     if (ppp->flags & SC_MULTILINK)
1588         return -EOPNOTSUPP;
1589 
1590     if (list_empty(&ppp->channels))
1591         return -ENODEV;
1592 
1593     pch = list_first_entry(&ppp->channels, struct channel, clist);
1594     chan = pch->chan;
1595     if (!chan->ops->fill_forward_path)
1596         return -EOPNOTSUPP;
1597 
1598     return chan->ops->fill_forward_path(ctx, path, chan);
1599 }
1600 
1601 static const struct net_device_ops ppp_netdev_ops = {
1602     .ndo_init    = ppp_dev_init,
1603     .ndo_uninit      = ppp_dev_uninit,
1604     .ndo_start_xmit  = ppp_start_xmit,
1605     .ndo_siocdevprivate = ppp_net_siocdevprivate,
1606     .ndo_get_stats64 = ppp_get_stats64,
1607     .ndo_fill_forward_path = ppp_fill_forward_path,
1608 };
1609 
1610 static struct device_type ppp_type = {
1611     .name = "ppp",
1612 };
1613 
1614 static void ppp_setup(struct net_device *dev)
1615 {
1616     dev->netdev_ops = &ppp_netdev_ops;
1617     SET_NETDEV_DEVTYPE(dev, &ppp_type);
1618 
1619     dev->features |= NETIF_F_LLTX;
1620 
1621     dev->hard_header_len = PPP_HDRLEN;
1622     dev->mtu = PPP_MRU;
1623     dev->addr_len = 0;
1624     dev->tx_queue_len = 3;
1625     dev->type = ARPHRD_PPP;
1626     dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1627     dev->priv_destructor = ppp_dev_priv_destructor;
1628     netif_keep_dst(dev);
1629 }
1630 
1631 /*
1632  * Transmit-side routines.
1633  */
1634 
1635 /* Called to do any work queued up on the transmit side that can now be done */
1636 static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1637 {
1638     ppp_xmit_lock(ppp);
1639     if (!ppp->closing) {
1640         ppp_push(ppp);
1641 
1642         if (skb)
1643             skb_queue_tail(&ppp->file.xq, skb);
1644         while (!ppp->xmit_pending &&
1645                (skb = skb_dequeue(&ppp->file.xq)))
1646             ppp_send_frame(ppp, skb);
1647         /* If there's no work left to do, tell the core net
1648            code that we can accept some more. */
1649         if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1650             netif_wake_queue(ppp->dev);
1651         else
1652             netif_stop_queue(ppp->dev);
1653     } else {
1654         kfree_skb(skb);
1655     }
1656     ppp_xmit_unlock(ppp);
1657 }
1658 
1659 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1660 {
1661     local_bh_disable();
1662 
1663     if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1664         goto err;
1665 
1666     (*this_cpu_ptr(ppp->xmit_recursion))++;
1667     __ppp_xmit_process(ppp, skb);
1668     (*this_cpu_ptr(ppp->xmit_recursion))--;
1669 
1670     local_bh_enable();
1671 
1672     return;
1673 
1674 err:
1675     local_bh_enable();
1676 
1677     kfree_skb(skb);
1678 
1679     if (net_ratelimit())
1680         netdev_err(ppp->dev, "recursion detected\n");
1681 }
1682 
1683 static inline struct sk_buff *
1684 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1685 {
1686     struct sk_buff *new_skb;
1687     int len;
1688     int new_skb_size = ppp->dev->mtu +
1689         ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1690     int compressor_skb_size = ppp->dev->mtu +
1691         ppp->xcomp->comp_extra + PPP_HDRLEN;
1692     new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1693     if (!new_skb) {
1694         if (net_ratelimit())
1695             netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1696         return NULL;
1697     }
1698     if (ppp->dev->hard_header_len > PPP_HDRLEN)
1699         skb_reserve(new_skb,
1700                 ppp->dev->hard_header_len - PPP_HDRLEN);
1701 
1702     /* compressor still expects A/C bytes in hdr */
1703     len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1704                    new_skb->data, skb->len + 2,
1705                    compressor_skb_size);
1706     if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1707         consume_skb(skb);
1708         skb = new_skb;
1709         skb_put(skb, len);
1710         skb_pull(skb, 2);   /* pull off A/C bytes */
1711     } else if (len == 0) {
1712         /* didn't compress, or CCP not up yet */
1713         consume_skb(new_skb);
1714         new_skb = skb;
1715     } else {
1716         /*
1717          * (len < 0)
1718          * MPPE requires that we do not send unencrypted
1719          * frames.  The compressor will return -1 if we
1720          * should drop the frame.  We cannot simply test
1721          * the compress_proto because MPPE and MPPC share
1722          * the same number.
1723          */
1724         if (net_ratelimit())
1725             netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1726         kfree_skb(skb);
1727         consume_skb(new_skb);
1728         new_skb = NULL;
1729     }
1730     return new_skb;
1731 }
1732 
1733 /*
1734  * Compress and send a frame.
1735  * The caller should have locked the xmit path,
1736  * and xmit_pending should be 0.
1737  */
1738 static void
1739 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1740 {
1741     int proto = PPP_PROTO(skb);
1742     struct sk_buff *new_skb;
1743     int len;
1744     unsigned char *cp;
1745 
1746     if (proto < 0x8000) {
1747 #ifdef CONFIG_PPP_FILTER
1748         /* check if we should pass this packet */
1749         /* the filter instructions are constructed assuming
1750            a four-byte PPP header on each packet */
1751         *(u8 *)skb_push(skb, 2) = 1;
1752         if (ppp->pass_filter &&
1753             bpf_prog_run(ppp->pass_filter, skb) == 0) {
1754             if (ppp->debug & 1)
1755                 netdev_printk(KERN_DEBUG, ppp->dev,
1756                           "PPP: outbound frame "
1757                           "not passed\n");
1758             kfree_skb(skb);
1759             return;
1760         }
1761         /* if this packet passes the active filter, record the time */
1762         if (!(ppp->active_filter &&
1763               bpf_prog_run(ppp->active_filter, skb) == 0))
1764             ppp->last_xmit = jiffies;
1765         skb_pull(skb, 2);
1766 #else
1767         /* for data packets, record the time */
1768         ppp->last_xmit = jiffies;
1769 #endif /* CONFIG_PPP_FILTER */
1770     }
1771 
1772     ++ppp->stats64.tx_packets;
1773     ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
1774 
1775     switch (proto) {
1776     case PPP_IP:
1777         if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1778             break;
1779         /* try to do VJ TCP header compression */
1780         new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1781                     GFP_ATOMIC);
1782         if (!new_skb) {
1783             netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1784             goto drop;
1785         }
1786         skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1787         cp = skb->data + 2;
1788         len = slhc_compress(ppp->vj, cp, skb->len - 2,
1789                     new_skb->data + 2, &cp,
1790                     !(ppp->flags & SC_NO_TCP_CCID));
1791         if (cp == skb->data + 2) {
1792             /* didn't compress */
1793             consume_skb(new_skb);
1794         } else {
1795             if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1796                 proto = PPP_VJC_COMP;
1797                 cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1798             } else {
1799                 proto = PPP_VJC_UNCOMP;
1800                 cp[0] = skb->data[2];
1801             }
1802             consume_skb(skb);
1803             skb = new_skb;
1804             cp = skb_put(skb, len + 2);
1805             cp[0] = 0;
1806             cp[1] = proto;
1807         }
1808         break;
1809 
1810     case PPP_CCP:
1811         /* peek at outbound CCP frames */
1812         ppp_ccp_peek(ppp, skb, 0);
1813         break;
1814     }
1815 
1816     /* try to do packet compression */
1817     if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1818         proto != PPP_LCP && proto != PPP_CCP) {
1819         if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1820             if (net_ratelimit())
1821                 netdev_err(ppp->dev,
1822                        "ppp: compression required but "
1823                        "down - pkt dropped.\n");
1824             goto drop;
1825         }
1826         skb = pad_compress_skb(ppp, skb);
1827         if (!skb)
1828             goto drop;
1829     }
1830 
1831     /*
1832      * If we are waiting for traffic (demand dialling),
1833      * queue it up for pppd to receive.
1834      */
1835     if (ppp->flags & SC_LOOP_TRAFFIC) {
1836         if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1837             goto drop;
1838         skb_queue_tail(&ppp->file.rq, skb);
1839         wake_up_interruptible(&ppp->file.rwait);
1840         return;
1841     }
1842 
1843     ppp->xmit_pending = skb;
1844     ppp_push(ppp);
1845     return;
1846 
1847  drop:
1848     kfree_skb(skb);
1849     ++ppp->dev->stats.tx_errors;
1850 }
1851 
1852 /*
1853  * Try to send the frame in xmit_pending.
1854  * The caller should have the xmit path locked.
1855  */
1856 static void
1857 ppp_push(struct ppp *ppp)
1858 {
1859     struct list_head *list;
1860     struct channel *pch;
1861     struct sk_buff *skb = ppp->xmit_pending;
1862 
1863     if (!skb)
1864         return;
1865 
1866     list = &ppp->channels;
1867     if (list_empty(list)) {
1868         /* nowhere to send the packet, just drop it */
1869         ppp->xmit_pending = NULL;
1870         kfree_skb(skb);
1871         return;
1872     }
1873 
1874     if ((ppp->flags & SC_MULTILINK) == 0) {
1875         /* not doing multilink: send it down the first channel */
1876         list = list->next;
1877         pch = list_entry(list, struct channel, clist);
1878 
1879         spin_lock(&pch->downl);
1880         if (pch->chan) {
1881             if (pch->chan->ops->start_xmit(pch->chan, skb))
1882                 ppp->xmit_pending = NULL;
1883         } else {
1884             /* channel got unregistered */
1885             kfree_skb(skb);
1886             ppp->xmit_pending = NULL;
1887         }
1888         spin_unlock(&pch->downl);
1889         return;
1890     }
1891 
1892 #ifdef CONFIG_PPP_MULTILINK
1893     /* Multilink: fragment the packet over as many links
1894        as can take the packet at the moment. */
1895     if (!ppp_mp_explode(ppp, skb))
1896         return;
1897 #endif /* CONFIG_PPP_MULTILINK */
1898 
1899     ppp->xmit_pending = NULL;
1900     kfree_skb(skb);
1901 }
1902 
1903 #ifdef CONFIG_PPP_MULTILINK
1904 static bool mp_protocol_compress __read_mostly = true;
1905 module_param(mp_protocol_compress, bool, 0644);
1906 MODULE_PARM_DESC(mp_protocol_compress,
1907          "compress protocol id in multilink fragments");
1908 
1909 /*
1910  * Divide a packet to be transmitted into fragments and
1911  * send them out the individual links.
1912  */
1913 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1914 {
1915     int len, totlen;
1916     int i, bits, hdrlen, mtu;
1917     int flen;
1918     int navail, nfree, nzero;
1919     int nbigger;
1920     int totspeed;
1921     int totfree;
1922     unsigned char *p, *q;
1923     struct list_head *list;
1924     struct channel *pch;
1925     struct sk_buff *frag;
1926     struct ppp_channel *chan;
1927 
1928     totspeed = 0; /*total bitrate of the bundle*/
1929     nfree = 0; /* # channels which have no packet already queued */
1930     navail = 0; /* total # of usable channels (not deregistered) */
1931     nzero = 0; /* number of channels with zero speed associated*/
1932     totfree = 0; /*total # of channels available and
1933                   *having no queued packets before
1934                   *starting the fragmentation*/
1935 
1936     hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1937     i = 0;
1938     list_for_each_entry(pch, &ppp->channels, clist) {
1939         if (pch->chan) {
1940             pch->avail = 1;
1941             navail++;
1942             pch->speed = pch->chan->speed;
1943         } else {
1944             pch->avail = 0;
1945         }
1946         if (pch->avail) {
1947             if (skb_queue_empty(&pch->file.xq) ||
1948                 !pch->had_frag) {
1949                     if (pch->speed == 0)
1950                         nzero++;
1951                     else
1952                         totspeed += pch->speed;
1953 
1954                     pch->avail = 2;
1955                     ++nfree;
1956                     ++totfree;
1957                 }
1958             if (!pch->had_frag && i < ppp->nxchan)
1959                 ppp->nxchan = i;
1960         }
1961         ++i;
1962     }
1963     /*
1964      * Don't start sending this packet unless at least half of
1965      * the channels are free.  This gives much better TCP
1966      * performance if we have a lot of channels.
1967      */
1968     if (nfree == 0 || nfree < navail / 2)
1969         return 0; /* can't take now, leave it in xmit_pending */
1970 
1971     /* Do protocol field compression */
1972     p = skb->data;
1973     len = skb->len;
1974     if (*p == 0 && mp_protocol_compress) {
1975         ++p;
1976         --len;
1977     }
1978 
1979     totlen = len;
1980     nbigger = len % nfree;
1981 
1982     /* skip to the channel after the one we last used
1983        and start at that one */
1984     list = &ppp->channels;
1985     for (i = 0; i < ppp->nxchan; ++i) {
1986         list = list->next;
1987         if (list == &ppp->channels) {
1988             i = 0;
1989             break;
1990         }
1991     }
1992 
1993     /* create a fragment for each channel */
1994     bits = B;
1995     while (len > 0) {
1996         list = list->next;
1997         if (list == &ppp->channels) {
1998             i = 0;
1999             continue;
2000         }
2001         pch = list_entry(list, struct channel, clist);
2002         ++i;
2003         if (!pch->avail)
2004             continue;
2005 
2006         /*
2007          * Skip this channel if it has a fragment pending already and
2008          * we haven't given a fragment to all of the free channels.
2009          */
2010         if (pch->avail == 1) {
2011             if (nfree > 0)
2012                 continue;
2013         } else {
2014             pch->avail = 1;
2015         }
2016 
2017         /* check the channel's mtu and whether it is still attached. */
2018         spin_lock(&pch->downl);
2019         if (pch->chan == NULL) {
2020             /* can't use this channel, it's being deregistered */
2021             if (pch->speed == 0)
2022                 nzero--;
2023             else
2024                 totspeed -= pch->speed;
2025 
2026             spin_unlock(&pch->downl);
2027             pch->avail = 0;
2028             totlen = len;
2029             totfree--;
2030             nfree--;
2031             if (--navail == 0)
2032                 break;
2033             continue;
2034         }
2035 
2036         /*
2037         *if the channel speed is not set divide
2038         *the packet evenly among the free channels;
2039         *otherwise divide it according to the speed
2040         *of the channel we are going to transmit on
2041         */
2042         flen = len;
2043         if (nfree > 0) {
2044             if (pch->speed == 0) {
2045                 flen = len/nfree;
2046                 if (nbigger > 0) {
2047                     flen++;
2048                     nbigger--;
2049                 }
2050             } else {
2051                 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
2052                     ((totspeed*totfree)/pch->speed)) - hdrlen;
2053                 if (nbigger > 0) {
2054                     flen += ((totfree - nzero)*pch->speed)/totspeed;
2055                     nbigger -= ((totfree - nzero)*pch->speed)/
2056                             totspeed;
2057                 }
2058             }
2059             nfree--;
2060         }
2061 
2062         /*
2063          *check if we are on the last channel or
2064          *we exceded the length of the data to
2065          *fragment
2066          */
2067         if ((nfree <= 0) || (flen > len))
2068             flen = len;
2069         /*
2070          *it is not worth to tx on slow channels:
2071          *in that case from the resulting flen according to the
2072          *above formula will be equal or less than zero.
2073          *Skip the channel in this case
2074          */
2075         if (flen <= 0) {
2076             pch->avail = 2;
2077             spin_unlock(&pch->downl);
2078             continue;
2079         }
2080 
2081         /*
2082          * hdrlen includes the 2-byte PPP protocol field, but the
2083          * MTU counts only the payload excluding the protocol field.
2084          * (RFC1661 Section 2)
2085          */
2086         mtu = pch->chan->mtu - (hdrlen - 2);
2087         if (mtu < 4)
2088             mtu = 4;
2089         if (flen > mtu)
2090             flen = mtu;
2091         if (flen == len)
2092             bits |= E;
2093         frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
2094         if (!frag)
2095             goto noskb;
2096         q = skb_put(frag, flen + hdrlen);
2097 
2098         /* make the MP header */
2099         put_unaligned_be16(PPP_MP, q);
2100         if (ppp->flags & SC_MP_XSHORTSEQ) {
2101             q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
2102             q[3] = ppp->nxseq;
2103         } else {
2104             q[2] = bits;
2105             q[3] = ppp->nxseq >> 16;
2106             q[4] = ppp->nxseq >> 8;
2107             q[5] = ppp->nxseq;
2108         }
2109 
2110         memcpy(q + hdrlen, p, flen);
2111 
2112         /* try to send it down the channel */
2113         chan = pch->chan;
2114         if (!skb_queue_empty(&pch->file.xq) ||
2115             !chan->ops->start_xmit(chan, frag))
2116             skb_queue_tail(&pch->file.xq, frag);
2117         pch->had_frag = 1;
2118         p += flen;
2119         len -= flen;
2120         ++ppp->nxseq;
2121         bits = 0;
2122         spin_unlock(&pch->downl);
2123     }
2124     ppp->nxchan = i;
2125 
2126     return 1;
2127 
2128  noskb:
2129     spin_unlock(&pch->downl);
2130     if (ppp->debug & 1)
2131         netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
2132     ++ppp->dev->stats.tx_errors;
2133     ++ppp->nxseq;
2134     return 1;   /* abandon the frame */
2135 }
2136 #endif /* CONFIG_PPP_MULTILINK */
2137 
2138 /* Try to send data out on a channel */
2139 static void __ppp_channel_push(struct channel *pch)
2140 {
2141     struct sk_buff *skb;
2142     struct ppp *ppp;
2143 
2144     spin_lock(&pch->downl);
2145     if (pch->chan) {
2146         while (!skb_queue_empty(&pch->file.xq)) {
2147             skb = skb_dequeue(&pch->file.xq);
2148             if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
2149                 /* put the packet back and try again later */
2150                 skb_queue_head(&pch->file.xq, skb);
2151                 break;
2152             }
2153         }
2154     } else {
2155         /* channel got deregistered */
2156         skb_queue_purge(&pch->file.xq);
2157     }
2158     spin_unlock(&pch->downl);
2159     /* see if there is anything from the attached unit to be sent */
2160     if (skb_queue_empty(&pch->file.xq)) {
2161         ppp = pch->ppp;
2162         if (ppp)
2163             __ppp_xmit_process(ppp, NULL);
2164     }
2165 }
2166 
2167 static void ppp_channel_push(struct channel *pch)
2168 {
2169     read_lock_bh(&pch->upl);
2170     if (pch->ppp) {
2171         (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
2172         __ppp_channel_push(pch);
2173         (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
2174     } else {
2175         __ppp_channel_push(pch);
2176     }
2177     read_unlock_bh(&pch->upl);
2178 }
2179 
2180 /*
2181  * Receive-side routines.
2182  */
2183 
2184 struct ppp_mp_skb_parm {
2185     u32     sequence;
2186     u8      BEbits;
2187 };
2188 #define PPP_MP_CB(skb)  ((struct ppp_mp_skb_parm *)((skb)->cb))
2189 
2190 static inline void
2191 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2192 {
2193     ppp_recv_lock(ppp);
2194     if (!ppp->closing)
2195         ppp_receive_frame(ppp, skb, pch);
2196     else
2197         kfree_skb(skb);
2198     ppp_recv_unlock(ppp);
2199 }
2200 
2201 /**
2202  * __ppp_decompress_proto - Decompress protocol field, slim version.
2203  * @skb: Socket buffer where protocol field should be decompressed. It must have
2204  *   at least 1 byte of head room and 1 byte of linear data. First byte of
2205  *   data must be a protocol field byte.
2206  *
2207  * Decompress protocol field in PPP header if it's compressed, e.g. when
2208  * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
2209  * length are done in this function.
2210  */
2211 static void __ppp_decompress_proto(struct sk_buff *skb)
2212 {
2213     if (skb->data[0] & 0x01)
2214         *(u8 *)skb_push(skb, 1) = 0x00;
2215 }
2216 
2217 /**
2218  * ppp_decompress_proto - Check skb data room and decompress protocol field.
2219  * @skb: Socket buffer where protocol field should be decompressed. First byte
2220  *   of data must be a protocol field byte.
2221  *
2222  * Decompress protocol field in PPP header if it's compressed, e.g. when
2223  * Protocol-Field-Compression (PFC) was negotiated. This function also makes
2224  * sure that skb data room is sufficient for Protocol field, before and after
2225  * decompression.
2226  *
2227  * Return: true - decompressed successfully, false - not enough room in skb.
2228  */
2229 static bool ppp_decompress_proto(struct sk_buff *skb)
2230 {
2231     /* At least one byte should be present (if protocol is compressed) */
2232     if (!pskb_may_pull(skb, 1))
2233         return false;
2234 
2235     __ppp_decompress_proto(skb);
2236 
2237     /* Protocol field should occupy 2 bytes when not compressed */
2238     return pskb_may_pull(skb, 2);
2239 }
2240 
2241 /* Attempt to handle a frame via. a bridged channel, if one exists.
2242  * If the channel is bridged, the frame is consumed by the bridge.
2243  * If not, the caller must handle the frame by normal recv mechanisms.
2244  * Returns true if the frame is consumed, false otherwise.
2245  */
2246 static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
2247 {
2248     struct channel *pchb;
2249 
2250     rcu_read_lock();
2251     pchb = rcu_dereference(pch->bridge);
2252     if (!pchb)
2253         goto out_rcu;
2254 
2255     spin_lock(&pchb->downl);
2256     if (!pchb->chan) {
2257         /* channel got unregistered */
2258         kfree_skb(skb);
2259         goto outl;
2260     }
2261 
2262     skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net));
2263     if (!pchb->chan->ops->start_xmit(pchb->chan, skb))
2264         kfree_skb(skb);
2265 
2266 outl:
2267     spin_unlock(&pchb->downl);
2268 out_rcu:
2269     rcu_read_unlock();
2270 
2271     /* If pchb is set then we've consumed the packet */
2272     return !!pchb;
2273 }
2274 
2275 void
2276 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
2277 {
2278     struct channel *pch = chan->ppp;
2279     int proto;
2280 
2281     if (!pch) {
2282         kfree_skb(skb);
2283         return;
2284     }
2285 
2286     /* If the channel is bridged, transmit via. bridge */
2287     if (ppp_channel_bridge_input(pch, skb))
2288         return;
2289 
2290     read_lock_bh(&pch->upl);
2291     if (!ppp_decompress_proto(skb)) {
2292         kfree_skb(skb);
2293         if (pch->ppp) {
2294             ++pch->ppp->dev->stats.rx_length_errors;
2295             ppp_receive_error(pch->ppp);
2296         }
2297         goto done;
2298     }
2299 
2300     proto = PPP_PROTO(skb);
2301     if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
2302         /* put it on the channel queue */
2303         skb_queue_tail(&pch->file.rq, skb);
2304         /* drop old frames if queue too long */
2305         while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
2306                (skb = skb_dequeue(&pch->file.rq)))
2307             kfree_skb(skb);
2308         wake_up_interruptible(&pch->file.rwait);
2309     } else {
2310         ppp_do_recv(pch->ppp, skb, pch);
2311     }
2312 
2313 done:
2314     read_unlock_bh(&pch->upl);
2315 }
2316 
2317 /* Put a 0-length skb in the receive queue as an error indication */
2318 void
2319 ppp_input_error(struct ppp_channel *chan, int code)
2320 {
2321     struct channel *pch = chan->ppp;
2322     struct sk_buff *skb;
2323 
2324     if (!pch)
2325         return;
2326 
2327     read_lock_bh(&pch->upl);
2328     if (pch->ppp) {
2329         skb = alloc_skb(0, GFP_ATOMIC);
2330         if (skb) {
2331             skb->len = 0;       /* probably unnecessary */
2332             skb->cb[0] = code;
2333             ppp_do_recv(pch->ppp, skb, pch);
2334         }
2335     }
2336     read_unlock_bh(&pch->upl);
2337 }
2338 
2339 /*
2340  * We come in here to process a received frame.
2341  * The receive side of the ppp unit is locked.
2342  */
2343 static void
2344 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2345 {
2346     /* note: a 0-length skb is used as an error indication */
2347     if (skb->len > 0) {
2348         skb_checksum_complete_unset(skb);
2349 #ifdef CONFIG_PPP_MULTILINK
2350         /* XXX do channel-level decompression here */
2351         if (PPP_PROTO(skb) == PPP_MP)
2352             ppp_receive_mp_frame(ppp, skb, pch);
2353         else
2354 #endif /* CONFIG_PPP_MULTILINK */
2355             ppp_receive_nonmp_frame(ppp, skb);
2356     } else {
2357         kfree_skb(skb);
2358         ppp_receive_error(ppp);
2359     }
2360 }
2361 
2362 static void
2363 ppp_receive_error(struct ppp *ppp)
2364 {
2365     ++ppp->dev->stats.rx_errors;
2366     if (ppp->vj)
2367         slhc_toss(ppp->vj);
2368 }
2369 
2370 static void
2371 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
2372 {
2373     struct sk_buff *ns;
2374     int proto, len, npi;
2375 
2376     /*
2377      * Decompress the frame, if compressed.
2378      * Note that some decompressors need to see uncompressed frames
2379      * that come in as well as compressed frames.
2380      */
2381     if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
2382         (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
2383         skb = ppp_decompress_frame(ppp, skb);
2384 
2385     if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
2386         goto err;
2387 
2388     /* At this point the "Protocol" field MUST be decompressed, either in
2389      * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
2390      */
2391     proto = PPP_PROTO(skb);
2392     switch (proto) {
2393     case PPP_VJC_COMP:
2394         /* decompress VJ compressed packets */
2395         if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2396             goto err;
2397 
2398         if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
2399             /* copy to a new sk_buff with more tailroom */
2400             ns = dev_alloc_skb(skb->len + 128);
2401             if (!ns) {
2402                 netdev_err(ppp->dev, "PPP: no memory "
2403                        "(VJ decomp)\n");
2404                 goto err;
2405             }
2406             skb_reserve(ns, 2);
2407             skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
2408             consume_skb(skb);
2409             skb = ns;
2410         }
2411         else
2412             skb->ip_summed = CHECKSUM_NONE;
2413 
2414         len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
2415         if (len <= 0) {
2416             netdev_printk(KERN_DEBUG, ppp->dev,
2417                       "PPP: VJ decompression error\n");
2418             goto err;
2419         }
2420         len += 2;
2421         if (len > skb->len)
2422             skb_put(skb, len - skb->len);
2423         else if (len < skb->len)
2424             skb_trim(skb, len);
2425         proto = PPP_IP;
2426         break;
2427 
2428     case PPP_VJC_UNCOMP:
2429         if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2430             goto err;
2431 
2432         /* Until we fix the decompressor need to make sure
2433          * data portion is linear.
2434          */
2435         if (!pskb_may_pull(skb, skb->len))
2436             goto err;
2437 
2438         if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
2439             netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
2440             goto err;
2441         }
2442         proto = PPP_IP;
2443         break;
2444 
2445     case PPP_CCP:
2446         ppp_ccp_peek(ppp, skb, 1);
2447         break;
2448     }
2449 
2450     ++ppp->stats64.rx_packets;
2451     ppp->stats64.rx_bytes += skb->len - 2;
2452 
2453     npi = proto_to_npindex(proto);
2454     if (npi < 0) {
2455         /* control or unknown frame - pass it to pppd */
2456         skb_queue_tail(&ppp->file.rq, skb);
2457         /* limit queue length by dropping old frames */
2458         while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
2459                (skb = skb_dequeue(&ppp->file.rq)))
2460             kfree_skb(skb);
2461         /* wake up any process polling or blocking on read */
2462         wake_up_interruptible(&ppp->file.rwait);
2463 
2464     } else {
2465         /* network protocol frame - give it to the kernel */
2466 
2467 #ifdef CONFIG_PPP_FILTER
2468         /* check if the packet passes the pass and active filters */
2469         /* the filter instructions are constructed assuming
2470            a four-byte PPP header on each packet */
2471         if (ppp->pass_filter || ppp->active_filter) {
2472             if (skb_unclone(skb, GFP_ATOMIC))
2473                 goto err;
2474 
2475             *(u8 *)skb_push(skb, 2) = 0;
2476             if (ppp->pass_filter &&
2477                 bpf_prog_run(ppp->pass_filter, skb) == 0) {
2478                 if (ppp->debug & 1)
2479                     netdev_printk(KERN_DEBUG, ppp->dev,
2480                               "PPP: inbound frame "
2481                               "not passed\n");
2482                 kfree_skb(skb);
2483                 return;
2484             }
2485             if (!(ppp->active_filter &&
2486                   bpf_prog_run(ppp->active_filter, skb) == 0))
2487                 ppp->last_recv = jiffies;
2488             __skb_pull(skb, 2);
2489         } else
2490 #endif /* CONFIG_PPP_FILTER */
2491             ppp->last_recv = jiffies;
2492 
2493         if ((ppp->dev->flags & IFF_UP) == 0 ||
2494             ppp->npmode[npi] != NPMODE_PASS) {
2495             kfree_skb(skb);
2496         } else {
2497             /* chop off protocol */
2498             skb_pull_rcsum(skb, 2);
2499             skb->dev = ppp->dev;
2500             skb->protocol = htons(npindex_to_ethertype[npi]);
2501             skb_reset_mac_header(skb);
2502             skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
2503                               dev_net(ppp->dev)));
2504             netif_rx(skb);
2505         }
2506     }
2507     return;
2508 
2509  err:
2510     kfree_skb(skb);
2511     ppp_receive_error(ppp);
2512 }
2513 
2514 static struct sk_buff *
2515 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
2516 {
2517     int proto = PPP_PROTO(skb);
2518     struct sk_buff *ns;
2519     int len;
2520 
2521     /* Until we fix all the decompressor's need to make sure
2522      * data portion is linear.
2523      */
2524     if (!pskb_may_pull(skb, skb->len))
2525         goto err;
2526 
2527     if (proto == PPP_COMP) {
2528         int obuff_size;
2529 
2530         switch(ppp->rcomp->compress_proto) {
2531         case CI_MPPE:
2532             obuff_size = ppp->mru + PPP_HDRLEN + 1;
2533             break;
2534         default:
2535             obuff_size = ppp->mru + PPP_HDRLEN;
2536             break;
2537         }
2538 
2539         ns = dev_alloc_skb(obuff_size);
2540         if (!ns) {
2541             netdev_err(ppp->dev, "ppp_decompress_frame: "
2542                    "no memory\n");
2543             goto err;
2544         }
2545         /* the decompressor still expects the A/C bytes in the hdr */
2546         len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
2547                 skb->len + 2, ns->data, obuff_size);
2548         if (len < 0) {
2549             /* Pass the compressed frame to pppd as an
2550                error indication. */
2551             if (len == DECOMP_FATALERROR)
2552                 ppp->rstate |= SC_DC_FERROR;
2553             kfree_skb(ns);
2554             goto err;
2555         }
2556 
2557         consume_skb(skb);
2558         skb = ns;
2559         skb_put(skb, len);
2560         skb_pull(skb, 2);   /* pull off the A/C bytes */
2561 
2562         /* Don't call __ppp_decompress_proto() here, but instead rely on
2563          * corresponding algo (mppe/bsd/deflate) to decompress it.
2564          */
2565     } else {
2566         /* Uncompressed frame - pass to decompressor so it
2567            can update its dictionary if necessary. */
2568         if (ppp->rcomp->incomp)
2569             ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
2570                        skb->len + 2);
2571     }
2572 
2573     return skb;
2574 
2575  err:
2576     ppp->rstate |= SC_DC_ERROR;
2577     ppp_receive_error(ppp);
2578     return skb;
2579 }
2580 
2581 #ifdef CONFIG_PPP_MULTILINK
2582 /*
2583  * Receive a multilink frame.
2584  * We put it on the reconstruction queue and then pull off
2585  * as many completed frames as we can.
2586  */
2587 static void
2588 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2589 {
2590     u32 mask, seq;
2591     struct channel *ch;
2592     int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
2593 
2594     if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
2595         goto err;       /* no good, throw it away */
2596 
2597     /* Decode sequence number and begin/end bits */
2598     if (ppp->flags & SC_MP_SHORTSEQ) {
2599         seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
2600         mask = 0xfff;
2601     } else {
2602         seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
2603         mask = 0xffffff;
2604     }
2605     PPP_MP_CB(skb)->BEbits = skb->data[2];
2606     skb_pull(skb, mphdrlen);    /* pull off PPP and MP headers */
2607 
2608     /*
2609      * Do protocol ID decompression on the first fragment of each packet.
2610      * We have to do that here, because ppp_receive_nonmp_frame() expects
2611      * decompressed protocol field.
2612      */
2613     if (PPP_MP_CB(skb)->BEbits & B)
2614         __ppp_decompress_proto(skb);
2615 
2616     /*
2617      * Expand sequence number to 32 bits, making it as close
2618      * as possible to ppp->minseq.
2619      */
2620     seq |= ppp->minseq & ~mask;
2621     if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
2622         seq += mask + 1;
2623     else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
2624         seq -= mask + 1;    /* should never happen */
2625     PPP_MP_CB(skb)->sequence = seq;
2626     pch->lastseq = seq;
2627 
2628     /*
2629      * If this packet comes before the next one we were expecting,
2630      * drop it.
2631      */
2632     if (seq_before(seq, ppp->nextseq)) {
2633         kfree_skb(skb);
2634         ++ppp->dev->stats.rx_dropped;
2635         ppp_receive_error(ppp);
2636         return;
2637     }
2638 
2639     /*
2640      * Reevaluate minseq, the minimum over all channels of the
2641      * last sequence number received on each channel.  Because of
2642      * the increasing sequence number rule, we know that any fragment
2643      * before `minseq' which hasn't arrived is never going to arrive.
2644      * The list of channels can't change because we have the receive
2645      * side of the ppp unit locked.
2646      */
2647     list_for_each_entry(ch, &ppp->channels, clist) {
2648         if (seq_before(ch->lastseq, seq))
2649             seq = ch->lastseq;
2650     }
2651     if (seq_before(ppp->minseq, seq))
2652         ppp->minseq = seq;
2653 
2654     /* Put the fragment on the reconstruction queue */
2655     ppp_mp_insert(ppp, skb);
2656 
2657     /* If the queue is getting long, don't wait any longer for packets
2658        before the start of the queue. */
2659     if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2660         struct sk_buff *mskb = skb_peek(&ppp->mrq);
2661         if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2662             ppp->minseq = PPP_MP_CB(mskb)->sequence;
2663     }
2664 
2665     /* Pull completed packets off the queue and receive them. */
2666     while ((skb = ppp_mp_reconstruct(ppp))) {
2667         if (pskb_may_pull(skb, 2))
2668             ppp_receive_nonmp_frame(ppp, skb);
2669         else {
2670             ++ppp->dev->stats.rx_length_errors;
2671             kfree_skb(skb);
2672             ppp_receive_error(ppp);
2673         }
2674     }
2675 
2676     return;
2677 
2678  err:
2679     kfree_skb(skb);
2680     ppp_receive_error(ppp);
2681 }
2682 
2683 /*
2684  * Insert a fragment on the MP reconstruction queue.
2685  * The queue is ordered by increasing sequence number.
2686  */
2687 static void
2688 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2689 {
2690     struct sk_buff *p;
2691     struct sk_buff_head *list = &ppp->mrq;
2692     u32 seq = PPP_MP_CB(skb)->sequence;
2693 
2694     /* N.B. we don't need to lock the list lock because we have the
2695        ppp unit receive-side lock. */
2696     skb_queue_walk(list, p) {
2697         if (seq_before(seq, PPP_MP_CB(p)->sequence))
2698             break;
2699     }
2700     __skb_queue_before(list, p, skb);
2701 }
2702 
2703 /*
2704  * Reconstruct a packet from the MP fragment queue.
2705  * We go through increasing sequence numbers until we find a
2706  * complete packet, or we get to the sequence number for a fragment
2707  * which hasn't arrived but might still do so.
2708  */
2709 static struct sk_buff *
2710 ppp_mp_reconstruct(struct ppp *ppp)
2711 {
2712     u32 seq = ppp->nextseq;
2713     u32 minseq = ppp->minseq;
2714     struct sk_buff_head *list = &ppp->mrq;
2715     struct sk_buff *p, *tmp;
2716     struct sk_buff *head, *tail;
2717     struct sk_buff *skb = NULL;
2718     int lost = 0, len = 0;
2719 
2720     if (ppp->mrru == 0) /* do nothing until mrru is set */
2721         return NULL;
2722     head = __skb_peek(list);
2723     tail = NULL;
2724     skb_queue_walk_safe(list, p, tmp) {
2725     again:
2726         if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2727             /* this can't happen, anyway ignore the skb */
2728             netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2729                    "seq %u < %u\n",
2730                    PPP_MP_CB(p)->sequence, seq);
2731             __skb_unlink(p, list);
2732             kfree_skb(p);
2733             continue;
2734         }
2735         if (PPP_MP_CB(p)->sequence != seq) {
2736             u32 oldseq;
2737             /* Fragment `seq' is missing.  If it is after
2738                minseq, it might arrive later, so stop here. */
2739             if (seq_after(seq, minseq))
2740                 break;
2741             /* Fragment `seq' is lost, keep going. */
2742             lost = 1;
2743             oldseq = seq;
2744             seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2745                 minseq + 1: PPP_MP_CB(p)->sequence;
2746 
2747             if (ppp->debug & 1)
2748                 netdev_printk(KERN_DEBUG, ppp->dev,
2749                           "lost frag %u..%u\n",
2750                           oldseq, seq-1);
2751 
2752             goto again;
2753         }
2754 
2755         /*
2756          * At this point we know that all the fragments from
2757          * ppp->nextseq to seq are either present or lost.
2758          * Also, there are no complete packets in the queue
2759          * that have no missing fragments and end before this
2760          * fragment.
2761          */
2762 
2763         /* B bit set indicates this fragment starts a packet */
2764         if (PPP_MP_CB(p)->BEbits & B) {
2765             head = p;
2766             lost = 0;
2767             len = 0;
2768         }
2769 
2770         len += p->len;
2771 
2772         /* Got a complete packet yet? */
2773         if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2774             (PPP_MP_CB(head)->BEbits & B)) {
2775             if (len > ppp->mrru + 2) {
2776                 ++ppp->dev->stats.rx_length_errors;
2777                 netdev_printk(KERN_DEBUG, ppp->dev,
2778                           "PPP: reconstructed packet"
2779                           " is too long (%d)\n", len);
2780             } else {
2781                 tail = p;
2782                 break;
2783             }
2784             ppp->nextseq = seq + 1;
2785         }
2786 
2787         /*
2788          * If this is the ending fragment of a packet,
2789          * and we haven't found a complete valid packet yet,
2790          * we can discard up to and including this fragment.
2791          */
2792         if (PPP_MP_CB(p)->BEbits & E) {
2793             struct sk_buff *tmp2;
2794 
2795             skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2796                 if (ppp->debug & 1)
2797                     netdev_printk(KERN_DEBUG, ppp->dev,
2798                               "discarding frag %u\n",
2799                               PPP_MP_CB(p)->sequence);
2800                 __skb_unlink(p, list);
2801                 kfree_skb(p);
2802             }
2803             head = skb_peek(list);
2804             if (!head)
2805                 break;
2806         }
2807         ++seq;
2808     }
2809 
2810     /* If we have a complete packet, copy it all into one skb. */
2811     if (tail != NULL) {
2812         /* If we have discarded any fragments,
2813            signal a receive error. */
2814         if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2815             skb_queue_walk_safe(list, p, tmp) {
2816                 if (p == head)
2817                     break;
2818                 if (ppp->debug & 1)
2819                     netdev_printk(KERN_DEBUG, ppp->dev,
2820                               "discarding frag %u\n",
2821                               PPP_MP_CB(p)->sequence);
2822                 __skb_unlink(p, list);
2823                 kfree_skb(p);
2824             }
2825 
2826             if (ppp->debug & 1)
2827                 netdev_printk(KERN_DEBUG, ppp->dev,
2828                           "  missed pkts %u..%u\n",
2829                           ppp->nextseq,
2830                           PPP_MP_CB(head)->sequence-1);
2831             ++ppp->dev->stats.rx_dropped;
2832             ppp_receive_error(ppp);
2833         }
2834 
2835         skb = head;
2836         if (head != tail) {
2837             struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2838             p = skb_queue_next(list, head);
2839             __skb_unlink(skb, list);
2840             skb_queue_walk_from_safe(list, p, tmp) {
2841                 __skb_unlink(p, list);
2842                 *fragpp = p;
2843                 p->next = NULL;
2844                 fragpp = &p->next;
2845 
2846                 skb->len += p->len;
2847                 skb->data_len += p->len;
2848                 skb->truesize += p->truesize;
2849 
2850                 if (p == tail)
2851                     break;
2852             }
2853         } else {
2854             __skb_unlink(skb, list);
2855         }
2856 
2857         ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2858     }
2859 
2860     return skb;
2861 }
2862 #endif /* CONFIG_PPP_MULTILINK */
2863 
2864 /*
2865  * Channel interface.
2866  */
2867 
2868 /* Create a new, unattached ppp channel. */
2869 int ppp_register_channel(struct ppp_channel *chan)
2870 {
2871     return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2872 }
2873 
2874 /* Create a new, unattached ppp channel for specified net. */
2875 int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2876 {
2877     struct channel *pch;
2878     struct ppp_net *pn;
2879 
2880     pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2881     if (!pch)
2882         return -ENOMEM;
2883 
2884     pn = ppp_pernet(net);
2885 
2886     pch->ppp = NULL;
2887     pch->chan = chan;
2888     pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
2889     chan->ppp = pch;
2890     init_ppp_file(&pch->file, CHANNEL);
2891     pch->file.hdrlen = chan->hdrlen;
2892 #ifdef CONFIG_PPP_MULTILINK
2893     pch->lastseq = -1;
2894 #endif /* CONFIG_PPP_MULTILINK */
2895     init_rwsem(&pch->chan_sem);
2896     spin_lock_init(&pch->downl);
2897     rwlock_init(&pch->upl);
2898 
2899     spin_lock_bh(&pn->all_channels_lock);
2900     pch->file.index = ++pn->last_channel_index;
2901     list_add(&pch->list, &pn->new_channels);
2902     atomic_inc(&channel_count);
2903     spin_unlock_bh(&pn->all_channels_lock);
2904 
2905     return 0;
2906 }
2907 
2908 /*
2909  * Return the index of a channel.
2910  */
2911 int ppp_channel_index(struct ppp_channel *chan)
2912 {
2913     struct channel *pch = chan->ppp;
2914 
2915     if (pch)
2916         return pch->file.index;
2917     return -1;
2918 }
2919 
2920 /*
2921  * Return the PPP unit number to which a channel is connected.
2922  */
2923 int ppp_unit_number(struct ppp_channel *chan)
2924 {
2925     struct channel *pch = chan->ppp;
2926     int unit = -1;
2927 
2928     if (pch) {
2929         read_lock_bh(&pch->upl);
2930         if (pch->ppp)
2931             unit = pch->ppp->file.index;
2932         read_unlock_bh(&pch->upl);
2933     }
2934     return unit;
2935 }
2936 
2937 /*
2938  * Return the PPP device interface name of a channel.
2939  */
2940 char *ppp_dev_name(struct ppp_channel *chan)
2941 {
2942     struct channel *pch = chan->ppp;
2943     char *name = NULL;
2944 
2945     if (pch) {
2946         read_lock_bh(&pch->upl);
2947         if (pch->ppp && pch->ppp->dev)
2948             name = pch->ppp->dev->name;
2949         read_unlock_bh(&pch->upl);
2950     }
2951     return name;
2952 }
2953 
2954 
2955 /*
2956  * Disconnect a channel from the generic layer.
2957  * This must be called in process context.
2958  */
2959 void
2960 ppp_unregister_channel(struct ppp_channel *chan)
2961 {
2962     struct channel *pch = chan->ppp;
2963     struct ppp_net *pn;
2964 
2965     if (!pch)
2966         return;     /* should never happen */
2967 
2968     chan->ppp = NULL;
2969 
2970     /*
2971      * This ensures that we have returned from any calls into
2972      * the channel's start_xmit or ioctl routine before we proceed.
2973      */
2974     down_write(&pch->chan_sem);
2975     spin_lock_bh(&pch->downl);
2976     pch->chan = NULL;
2977     spin_unlock_bh(&pch->downl);
2978     up_write(&pch->chan_sem);
2979     ppp_disconnect_channel(pch);
2980 
2981     pn = ppp_pernet(pch->chan_net);
2982     spin_lock_bh(&pn->all_channels_lock);
2983     list_del(&pch->list);
2984     spin_unlock_bh(&pn->all_channels_lock);
2985 
2986     ppp_unbridge_channels(pch);
2987 
2988     pch->file.dead = 1;
2989     wake_up_interruptible(&pch->file.rwait);
2990 
2991     if (refcount_dec_and_test(&pch->file.refcnt))
2992         ppp_destroy_channel(pch);
2993 }
2994 
2995 /*
2996  * Callback from a channel when it can accept more to transmit.
2997  * This should be called at BH/softirq level, not interrupt level.
2998  */
2999 void
3000 ppp_output_wakeup(struct ppp_channel *chan)
3001 {
3002     struct channel *pch = chan->ppp;
3003 
3004     if (!pch)
3005         return;
3006     ppp_channel_push(pch);
3007 }
3008 
3009 /*
3010  * Compression control.
3011  */
3012 
3013 /* Process the PPPIOCSCOMPRESS ioctl. */
3014 static int
3015 ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
3016 {
3017     int err = -EFAULT;
3018     struct compressor *cp, *ocomp;
3019     void *state, *ostate;
3020     unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
3021 
3022     if (data->length > CCP_MAX_OPTION_LENGTH)
3023         goto out;
3024     if (copy_from_user(ccp_option, data->ptr, data->length))
3025         goto out;
3026 
3027     err = -EINVAL;
3028     if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
3029         goto out;
3030 
3031     cp = try_then_request_module(
3032         find_compressor(ccp_option[0]),
3033         "ppp-compress-%d", ccp_option[0]);
3034     if (!cp)
3035         goto out;
3036 
3037     err = -ENOBUFS;
3038     if (data->transmit) {
3039         state = cp->comp_alloc(ccp_option, data->length);
3040         if (state) {
3041             ppp_xmit_lock(ppp);
3042             ppp->xstate &= ~SC_COMP_RUN;
3043             ocomp = ppp->xcomp;
3044             ostate = ppp->xc_state;
3045             ppp->xcomp = cp;
3046             ppp->xc_state = state;
3047             ppp_xmit_unlock(ppp);
3048             if (ostate) {
3049                 ocomp->comp_free(ostate);
3050                 module_put(ocomp->owner);
3051             }
3052             err = 0;
3053         } else
3054             module_put(cp->owner);
3055 
3056     } else {
3057         state = cp->decomp_alloc(ccp_option, data->length);
3058         if (state) {
3059             ppp_recv_lock(ppp);
3060             ppp->rstate &= ~SC_DECOMP_RUN;
3061             ocomp = ppp->rcomp;
3062             ostate = ppp->rc_state;
3063             ppp->rcomp = cp;
3064             ppp->rc_state = state;
3065             ppp_recv_unlock(ppp);
3066             if (ostate) {
3067                 ocomp->decomp_free(ostate);
3068                 module_put(ocomp->owner);
3069             }
3070             err = 0;
3071         } else
3072             module_put(cp->owner);
3073     }
3074 
3075  out:
3076     return err;
3077 }
3078 
3079 /*
3080  * Look at a CCP packet and update our state accordingly.
3081  * We assume the caller has the xmit or recv path locked.
3082  */
3083 static void
3084 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
3085 {
3086     unsigned char *dp;
3087     int len;
3088 
3089     if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
3090         return; /* no header */
3091     dp = skb->data + 2;
3092 
3093     switch (CCP_CODE(dp)) {
3094     case CCP_CONFREQ:
3095 
3096         /* A ConfReq starts negotiation of compression
3097          * in one direction of transmission,
3098          * and hence brings it down...but which way?
3099          *
3100          * Remember:
3101          * A ConfReq indicates what the sender would like to receive
3102          */
3103         if(inbound)
3104             /* He is proposing what I should send */
3105             ppp->xstate &= ~SC_COMP_RUN;
3106         else
3107             /* I am proposing to what he should send */
3108             ppp->rstate &= ~SC_DECOMP_RUN;
3109 
3110         break;
3111 
3112     case CCP_TERMREQ:
3113     case CCP_TERMACK:
3114         /*
3115          * CCP is going down, both directions of transmission
3116          */
3117         ppp->rstate &= ~SC_DECOMP_RUN;
3118         ppp->xstate &= ~SC_COMP_RUN;
3119         break;
3120 
3121     case CCP_CONFACK:
3122         if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
3123             break;
3124         len = CCP_LENGTH(dp);
3125         if (!pskb_may_pull(skb, len + 2))
3126             return;     /* too short */
3127         dp += CCP_HDRLEN;
3128         len -= CCP_HDRLEN;
3129         if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
3130             break;
3131         if (inbound) {
3132             /* we will start receiving compressed packets */
3133             if (!ppp->rc_state)
3134                 break;
3135             if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
3136                     ppp->file.index, 0, ppp->mru, ppp->debug)) {
3137                 ppp->rstate |= SC_DECOMP_RUN;
3138                 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
3139             }
3140         } else {
3141             /* we will soon start sending compressed packets */
3142             if (!ppp->xc_state)
3143                 break;
3144             if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
3145                     ppp->file.index, 0, ppp->debug))
3146                 ppp->xstate |= SC_COMP_RUN;
3147         }
3148         break;
3149 
3150     case CCP_RESETACK:
3151         /* reset the [de]compressor */
3152         if ((ppp->flags & SC_CCP_UP) == 0)
3153             break;
3154         if (inbound) {
3155             if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
3156                 ppp->rcomp->decomp_reset(ppp->rc_state);
3157                 ppp->rstate &= ~SC_DC_ERROR;
3158             }
3159         } else {
3160             if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
3161                 ppp->xcomp->comp_reset(ppp->xc_state);
3162         }
3163         break;
3164     }
3165 }
3166 
3167 /* Free up compression resources. */
3168 static void
3169 ppp_ccp_closed(struct ppp *ppp)
3170 {
3171     void *xstate, *rstate;
3172     struct compressor *xcomp, *rcomp;
3173 
3174     ppp_lock(ppp);
3175     ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
3176     ppp->xstate = 0;
3177     xcomp = ppp->xcomp;
3178     xstate = ppp->xc_state;
3179     ppp->xc_state = NULL;
3180     ppp->rstate = 0;
3181     rcomp = ppp->rcomp;
3182     rstate = ppp->rc_state;
3183     ppp->rc_state = NULL;
3184     ppp_unlock(ppp);
3185 
3186     if (xstate) {
3187         xcomp->comp_free(xstate);
3188         module_put(xcomp->owner);
3189     }
3190     if (rstate) {
3191         rcomp->decomp_free(rstate);
3192         module_put(rcomp->owner);
3193     }
3194 }
3195 
3196 /* List of compressors. */
3197 static LIST_HEAD(compressor_list);
3198 static DEFINE_SPINLOCK(compressor_list_lock);
3199 
3200 struct compressor_entry {
3201     struct list_head list;
3202     struct compressor *comp;
3203 };
3204 
3205 static struct compressor_entry *
3206 find_comp_entry(int proto)
3207 {
3208     struct compressor_entry *ce;
3209 
3210     list_for_each_entry(ce, &compressor_list, list) {
3211         if (ce->comp->compress_proto == proto)
3212             return ce;
3213     }
3214     return NULL;
3215 }
3216 
3217 /* Register a compressor */
3218 int
3219 ppp_register_compressor(struct compressor *cp)
3220 {
3221     struct compressor_entry *ce;
3222     int ret;
3223     spin_lock(&compressor_list_lock);
3224     ret = -EEXIST;
3225     if (find_comp_entry(cp->compress_proto))
3226         goto out;
3227     ret = -ENOMEM;
3228     ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
3229     if (!ce)
3230         goto out;
3231     ret = 0;
3232     ce->comp = cp;
3233     list_add(&ce->list, &compressor_list);
3234  out:
3235     spin_unlock(&compressor_list_lock);
3236     return ret;
3237 }
3238 
3239 /* Unregister a compressor */
3240 void
3241 ppp_unregister_compressor(struct compressor *cp)
3242 {
3243     struct compressor_entry *ce;
3244 
3245     spin_lock(&compressor_list_lock);
3246     ce = find_comp_entry(cp->compress_proto);
3247     if (ce && ce->comp == cp) {
3248         list_del(&ce->list);
3249         kfree(ce);
3250     }
3251     spin_unlock(&compressor_list_lock);
3252 }
3253 
3254 /* Find a compressor. */
3255 static struct compressor *
3256 find_compressor(int type)
3257 {
3258     struct compressor_entry *ce;
3259     struct compressor *cp = NULL;
3260 
3261     spin_lock(&compressor_list_lock);
3262     ce = find_comp_entry(type);
3263     if (ce) {
3264         cp = ce->comp;
3265         if (!try_module_get(cp->owner))
3266             cp = NULL;
3267     }
3268     spin_unlock(&compressor_list_lock);
3269     return cp;
3270 }
3271 
3272 /*
3273  * Miscelleneous stuff.
3274  */
3275 
3276 static void
3277 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
3278 {
3279     struct slcompress *vj = ppp->vj;
3280 
3281     memset(st, 0, sizeof(*st));
3282     st->p.ppp_ipackets = ppp->stats64.rx_packets;
3283     st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
3284     st->p.ppp_ibytes = ppp->stats64.rx_bytes;
3285     st->p.ppp_opackets = ppp->stats64.tx_packets;
3286     st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
3287     st->p.ppp_obytes = ppp->stats64.tx_bytes;
3288     if (!vj)
3289         return;
3290     st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
3291     st->vj.vjs_compressed = vj->sls_o_compressed;
3292     st->vj.vjs_searches = vj->sls_o_searches;
3293     st->vj.vjs_misses = vj->sls_o_misses;
3294     st->vj.vjs_errorin = vj->sls_i_error;
3295     st->vj.vjs_tossed = vj->sls_i_tossed;
3296     st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
3297     st->vj.vjs_compressedin = vj->sls_i_compressed;
3298 }
3299 
3300 /*
3301  * Stuff for handling the lists of ppp units and channels
3302  * and for initialization.
3303  */
3304 
3305 /*
3306  * Create a new ppp interface unit.  Fails if it can't allocate memory
3307  * or if there is already a unit with the requested number.
3308  * unit == -1 means allocate a new number.
3309  */
3310 static int ppp_create_interface(struct net *net, struct file *file, int *unit)
3311 {
3312     struct ppp_config conf = {
3313         .file = file,
3314         .unit = *unit,
3315         .ifname_is_set = false,
3316     };
3317     struct net_device *dev;
3318     struct ppp *ppp;
3319     int err;
3320 
3321     dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
3322     if (!dev) {
3323         err = -ENOMEM;
3324         goto err;
3325     }
3326     dev_net_set(dev, net);
3327     dev->rtnl_link_ops = &ppp_link_ops;
3328 
3329     rtnl_lock();
3330 
3331     err = ppp_dev_configure(net, dev, &conf);
3332     if (err < 0)
3333         goto err_dev;
3334     ppp = netdev_priv(dev);
3335     *unit = ppp->file.index;
3336 
3337     rtnl_unlock();
3338 
3339     return 0;
3340 
3341 err_dev:
3342     rtnl_unlock();
3343     free_netdev(dev);
3344 err:
3345     return err;
3346 }
3347 
3348 /*
3349  * Initialize a ppp_file structure.
3350  */
3351 static void
3352 init_ppp_file(struct ppp_file *pf, int kind)
3353 {
3354     pf->kind = kind;
3355     skb_queue_head_init(&pf->xq);
3356     skb_queue_head_init(&pf->rq);
3357     refcount_set(&pf->refcnt, 1);
3358     init_waitqueue_head(&pf->rwait);
3359 }
3360 
3361 /*
3362  * Free the memory used by a ppp unit.  This is only called once
3363  * there are no channels connected to the unit and no file structs
3364  * that reference the unit.
3365  */
3366 static void ppp_destroy_interface(struct ppp *ppp)
3367 {
3368     atomic_dec(&ppp_unit_count);
3369 
3370     if (!ppp->file.dead || ppp->n_channels) {
3371         /* "can't happen" */
3372         netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
3373                "but dead=%d n_channels=%d !\n",
3374                ppp, ppp->file.dead, ppp->n_channels);
3375         return;
3376     }
3377 
3378     ppp_ccp_closed(ppp);
3379     if (ppp->vj) {
3380         slhc_free(ppp->vj);
3381         ppp->vj = NULL;
3382     }
3383     skb_queue_purge(&ppp->file.xq);
3384     skb_queue_purge(&ppp->file.rq);
3385 #ifdef CONFIG_PPP_MULTILINK
3386     skb_queue_purge(&ppp->mrq);
3387 #endif /* CONFIG_PPP_MULTILINK */
3388 #ifdef CONFIG_PPP_FILTER
3389     if (ppp->pass_filter) {
3390         bpf_prog_destroy(ppp->pass_filter);
3391         ppp->pass_filter = NULL;
3392     }
3393 
3394     if (ppp->active_filter) {
3395         bpf_prog_destroy(ppp->active_filter);
3396         ppp->active_filter = NULL;
3397     }
3398 #endif /* CONFIG_PPP_FILTER */
3399 
3400     kfree_skb(ppp->xmit_pending);
3401     free_percpu(ppp->xmit_recursion);
3402 
3403     free_netdev(ppp->dev);
3404 }
3405 
3406 /*
3407  * Locate an existing ppp unit.
3408  * The caller should have locked the all_ppp_mutex.
3409  */
3410 static struct ppp *
3411 ppp_find_unit(struct ppp_net *pn, int unit)
3412 {
3413     return unit_find(&pn->units_idr, unit);
3414 }
3415 
3416 /*
3417  * Locate an existing ppp channel.
3418  * The caller should have locked the all_channels_lock.
3419  * First we look in the new_channels list, then in the
3420  * all_channels list.  If found in the new_channels list,
3421  * we move it to the all_channels list.  This is for speed
3422  * when we have a lot of channels in use.
3423  */
3424 static struct channel *
3425 ppp_find_channel(struct ppp_net *pn, int unit)
3426 {
3427     struct channel *pch;
3428 
3429     list_for_each_entry(pch, &pn->new_channels, list) {
3430         if (pch->file.index == unit) {
3431             list_move(&pch->list, &pn->all_channels);
3432             return pch;
3433         }
3434     }
3435 
3436     list_for_each_entry(pch, &pn->all_channels, list) {
3437         if (pch->file.index == unit)
3438             return pch;
3439     }
3440 
3441     return NULL;
3442 }
3443 
3444 /*
3445  * Connect a PPP channel to a PPP interface unit.
3446  */
3447 static int
3448 ppp_connect_channel(struct channel *pch, int unit)
3449 {
3450     struct ppp *ppp;
3451     struct ppp_net *pn;
3452     int ret = -ENXIO;
3453     int hdrlen;
3454 
3455     pn = ppp_pernet(pch->chan_net);
3456 
3457     mutex_lock(&pn->all_ppp_mutex);
3458     ppp = ppp_find_unit(pn, unit);
3459     if (!ppp)
3460         goto out;
3461     write_lock_bh(&pch->upl);
3462     ret = -EINVAL;
3463     if (pch->ppp ||
3464         rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
3465         goto outl;
3466 
3467     ppp_lock(ppp);
3468     spin_lock_bh(&pch->downl);
3469     if (!pch->chan) {
3470         /* Don't connect unregistered channels */
3471         spin_unlock_bh(&pch->downl);
3472         ppp_unlock(ppp);
3473         ret = -ENOTCONN;
3474         goto outl;
3475     }
3476     spin_unlock_bh(&pch->downl);
3477     if (pch->file.hdrlen > ppp->file.hdrlen)
3478         ppp->file.hdrlen = pch->file.hdrlen;
3479     hdrlen = pch->file.hdrlen + 2;  /* for protocol bytes */
3480     if (hdrlen > ppp->dev->hard_header_len)
3481         ppp->dev->hard_header_len = hdrlen;
3482     list_add_tail(&pch->clist, &ppp->channels);
3483     ++ppp->n_channels;
3484     pch->ppp = ppp;
3485     refcount_inc(&ppp->file.refcnt);
3486     ppp_unlock(ppp);
3487     ret = 0;
3488 
3489  outl:
3490     write_unlock_bh(&pch->upl);
3491  out:
3492     mutex_unlock(&pn->all_ppp_mutex);
3493     return ret;
3494 }
3495 
3496 /*
3497  * Disconnect a channel from its ppp unit.
3498  */
3499 static int
3500 ppp_disconnect_channel(struct channel *pch)
3501 {
3502     struct ppp *ppp;
3503     int err = -EINVAL;
3504 
3505     write_lock_bh(&pch->upl);
3506     ppp = pch->ppp;
3507     pch->ppp = NULL;
3508     write_unlock_bh(&pch->upl);
3509     if (ppp) {
3510         /* remove it from the ppp unit's list */
3511         ppp_lock(ppp);
3512         list_del(&pch->clist);
3513         if (--ppp->n_channels == 0)
3514             wake_up_interruptible(&ppp->file.rwait);
3515         ppp_unlock(ppp);
3516         if (refcount_dec_and_test(&ppp->file.refcnt))
3517             ppp_destroy_interface(ppp);
3518         err = 0;
3519     }
3520     return err;
3521 }
3522 
3523 /*
3524  * Free up the resources used by a ppp channel.
3525  */
3526 static void ppp_destroy_channel(struct channel *pch)
3527 {
3528     put_net_track(pch->chan_net, &pch->ns_tracker);
3529     pch->chan_net = NULL;
3530 
3531     atomic_dec(&channel_count);
3532 
3533     if (!pch->file.dead) {
3534         /* "can't happen" */
3535         pr_err("ppp: destroying undead channel %p !\n", pch);
3536         return;
3537     }
3538     skb_queue_purge(&pch->file.xq);
3539     skb_queue_purge(&pch->file.rq);
3540     kfree(pch);
3541 }
3542 
3543 static void __exit ppp_cleanup(void)
3544 {
3545     /* should never happen */
3546     if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
3547         pr_err("PPP: removing module but units remain!\n");
3548     rtnl_link_unregister(&ppp_link_ops);
3549     unregister_chrdev(PPP_MAJOR, "ppp");
3550     device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
3551     class_destroy(ppp_class);
3552     unregister_pernet_device(&ppp_net_ops);
3553 }
3554 
3555 /*
3556  * Units handling. Caller must protect concurrent access
3557  * by holding all_ppp_mutex
3558  */
3559 
3560 /* associate pointer with specified number */
3561 static int unit_set(struct idr *p, void *ptr, int n)
3562 {
3563     int unit;
3564 
3565     unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
3566     if (unit == -ENOSPC)
3567         unit = -EINVAL;
3568     return unit;
3569 }
3570 
3571 /* get new free unit number and associate pointer with it */
3572 static int unit_get(struct idr *p, void *ptr, int min)
3573 {
3574     return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
3575 }
3576 
3577 /* put unit number back to a pool */
3578 static void unit_put(struct idr *p, int n)
3579 {
3580     idr_remove(p, n);
3581 }
3582 
3583 /* get pointer associated with the number */
3584 static void *unit_find(struct idr *p, int n)
3585 {
3586     return idr_find(p, n);
3587 }
3588 
3589 /* Module/initialization stuff */
3590 
3591 module_init(ppp_init);
3592 module_exit(ppp_cleanup);
3593 
3594 EXPORT_SYMBOL(ppp_register_net_channel);
3595 EXPORT_SYMBOL(ppp_register_channel);
3596 EXPORT_SYMBOL(ppp_unregister_channel);
3597 EXPORT_SYMBOL(ppp_channel_index);
3598 EXPORT_SYMBOL(ppp_unit_number);
3599 EXPORT_SYMBOL(ppp_dev_name);
3600 EXPORT_SYMBOL(ppp_input);
3601 EXPORT_SYMBOL(ppp_input_error);
3602 EXPORT_SYMBOL(ppp_output_wakeup);
3603 EXPORT_SYMBOL(ppp_register_compressor);
3604 EXPORT_SYMBOL(ppp_unregister_compressor);
3605 MODULE_LICENSE("GPL");
3606 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3607 MODULE_ALIAS_RTNL_LINK("ppp");
3608 MODULE_ALIAS("devname:ppp");