Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * PPP synchronous tty channel driver for Linux.
0004  *
0005  * This is a ppp channel driver that can be used with tty device drivers
0006  * that are frame oriented, such as synchronous HDLC devices.
0007  *
0008  * Complete PPP frames without encoding/decoding are exchanged between
0009  * the channel driver and the device driver.
0010  *
0011  * The async map IOCTL codes are implemented to keep the user mode
0012  * applications happy if they call them. Synchronous PPP does not use
0013  * the async maps.
0014  *
0015  * Copyright 1999 Paul Mackerras.
0016  *
0017  * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
0018  *
0019  * This driver provides the encapsulation and framing for sending
0020  * and receiving PPP frames over sync serial lines.  It relies on
0021  * the generic PPP layer to give it frames to send and to process
0022  * received frames.  It implements the PPP line discipline.
0023  *
0024  * Part of the code in this driver was inspired by the old async-only
0025  * PPP driver, written by Michael Callahan and Al Longyear, and
0026  * subsequently hacked by Paul Mackerras.
0027  *
0028  * ==FILEVERSION 20040616==
0029  */
0030 
0031 #include <linux/module.h>
0032 #include <linux/kernel.h>
0033 #include <linux/skbuff.h>
0034 #include <linux/tty.h>
0035 #include <linux/netdevice.h>
0036 #include <linux/poll.h>
0037 #include <linux/ppp_defs.h>
0038 #include <linux/ppp-ioctl.h>
0039 #include <linux/ppp_channel.h>
0040 #include <linux/spinlock.h>
0041 #include <linux/completion.h>
0042 #include <linux/init.h>
0043 #include <linux/interrupt.h>
0044 #include <linux/slab.h>
0045 #include <linux/refcount.h>
0046 #include <asm/unaligned.h>
0047 #include <linux/uaccess.h>
0048 
0049 #define PPP_VERSION "2.4.2"
0050 
0051 /* Structure for storing local state. */
0052 struct syncppp {
0053     struct tty_struct *tty;
0054     unsigned int    flags;
0055     unsigned int    rbits;
0056     int     mru;
0057     spinlock_t  xmit_lock;
0058     spinlock_t  recv_lock;
0059     unsigned long   xmit_flags;
0060     u32     xaccm[8];
0061     u32     raccm;
0062     unsigned int    bytes_sent;
0063     unsigned int    bytes_rcvd;
0064 
0065     struct sk_buff  *tpkt;
0066     unsigned long   last_xmit;
0067 
0068     struct sk_buff_head rqueue;
0069 
0070     struct tasklet_struct tsk;
0071 
0072     refcount_t  refcnt;
0073     struct completion dead_cmp;
0074     struct ppp_channel chan;    /* interface to generic ppp layer */
0075 };
0076 
0077 /* Bit numbers in xmit_flags */
0078 #define XMIT_WAKEUP 0
0079 #define XMIT_FULL   1
0080 
0081 /* Bits in rbits */
0082 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
0083 
0084 #define PPPSYNC_MAX_RQLEN   32  /* arbitrary */
0085 
0086 /*
0087  * Prototypes.
0088  */
0089 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
0090 static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
0091 static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
0092               unsigned long arg);
0093 static void ppp_sync_process(struct tasklet_struct *t);
0094 static int ppp_sync_push(struct syncppp *ap);
0095 static void ppp_sync_flush_output(struct syncppp *ap);
0096 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
0097                const char *flags, int count);
0098 
0099 static const struct ppp_channel_ops sync_ops = {
0100     .start_xmit = ppp_sync_send,
0101     .ioctl      = ppp_sync_ioctl,
0102 };
0103 
0104 /*
0105  * Utility procedure to print a buffer in hex/ascii
0106  */
0107 static void
0108 ppp_print_buffer (const char *name, const __u8 *buf, int count)
0109 {
0110     if (name != NULL)
0111         printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
0112 
0113     print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
0114 }
0115 
0116 
0117 /*
0118  * Routines implementing the synchronous PPP line discipline.
0119  */
0120 
0121 /*
0122  * We have a potential race on dereferencing tty->disc_data,
0123  * because the tty layer provides no locking at all - thus one
0124  * cpu could be running ppp_synctty_receive while another
0125  * calls ppp_synctty_close, which zeroes tty->disc_data and
0126  * frees the memory that ppp_synctty_receive is using.  The best
0127  * way to fix this is to use a rwlock in the tty struct, but for now
0128  * we use a single global rwlock for all ttys in ppp line discipline.
0129  *
0130  * FIXME: Fixed in tty_io nowadays.
0131  */
0132 static DEFINE_RWLOCK(disc_data_lock);
0133 
0134 static struct syncppp *sp_get(struct tty_struct *tty)
0135 {
0136     struct syncppp *ap;
0137 
0138     read_lock(&disc_data_lock);
0139     ap = tty->disc_data;
0140     if (ap != NULL)
0141         refcount_inc(&ap->refcnt);
0142     read_unlock(&disc_data_lock);
0143     return ap;
0144 }
0145 
0146 static void sp_put(struct syncppp *ap)
0147 {
0148     if (refcount_dec_and_test(&ap->refcnt))
0149         complete(&ap->dead_cmp);
0150 }
0151 
0152 /*
0153  * Called when a tty is put into sync-PPP line discipline.
0154  */
0155 static int
0156 ppp_sync_open(struct tty_struct *tty)
0157 {
0158     struct syncppp *ap;
0159     int err;
0160     int speed;
0161 
0162     if (tty->ops->write == NULL)
0163         return -EOPNOTSUPP;
0164 
0165     ap = kzalloc(sizeof(*ap), GFP_KERNEL);
0166     err = -ENOMEM;
0167     if (!ap)
0168         goto out;
0169 
0170     /* initialize the syncppp structure */
0171     ap->tty = tty;
0172     ap->mru = PPP_MRU;
0173     spin_lock_init(&ap->xmit_lock);
0174     spin_lock_init(&ap->recv_lock);
0175     ap->xaccm[0] = ~0U;
0176     ap->xaccm[3] = 0x60000000U;
0177     ap->raccm = ~0U;
0178 
0179     skb_queue_head_init(&ap->rqueue);
0180     tasklet_setup(&ap->tsk, ppp_sync_process);
0181 
0182     refcount_set(&ap->refcnt, 1);
0183     init_completion(&ap->dead_cmp);
0184 
0185     ap->chan.private = ap;
0186     ap->chan.ops = &sync_ops;
0187     ap->chan.mtu = PPP_MRU;
0188     ap->chan.hdrlen = 2;    /* for A/C bytes */
0189     speed = tty_get_baud_rate(tty);
0190     ap->chan.speed = speed;
0191     err = ppp_register_channel(&ap->chan);
0192     if (err)
0193         goto out_free;
0194 
0195     tty->disc_data = ap;
0196     tty->receive_room = 65536;
0197     return 0;
0198 
0199  out_free:
0200     kfree(ap);
0201  out:
0202     return err;
0203 }
0204 
0205 /*
0206  * Called when the tty is put into another line discipline
0207  * or it hangs up.  We have to wait for any cpu currently
0208  * executing in any of the other ppp_synctty_* routines to
0209  * finish before we can call ppp_unregister_channel and free
0210  * the syncppp struct.  This routine must be called from
0211  * process context, not interrupt or softirq context.
0212  */
0213 static void
0214 ppp_sync_close(struct tty_struct *tty)
0215 {
0216     struct syncppp *ap;
0217 
0218     write_lock_irq(&disc_data_lock);
0219     ap = tty->disc_data;
0220     tty->disc_data = NULL;
0221     write_unlock_irq(&disc_data_lock);
0222     if (!ap)
0223         return;
0224 
0225     /*
0226      * We have now ensured that nobody can start using ap from now
0227      * on, but we have to wait for all existing users to finish.
0228      * Note that ppp_unregister_channel ensures that no calls to
0229      * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
0230      * by the time it returns.
0231      */
0232     if (!refcount_dec_and_test(&ap->refcnt))
0233         wait_for_completion(&ap->dead_cmp);
0234     tasklet_kill(&ap->tsk);
0235 
0236     ppp_unregister_channel(&ap->chan);
0237     skb_queue_purge(&ap->rqueue);
0238     kfree_skb(ap->tpkt);
0239     kfree(ap);
0240 }
0241 
0242 /*
0243  * Called on tty hangup in process context.
0244  *
0245  * Wait for I/O to driver to complete and unregister PPP channel.
0246  * This is already done by the close routine, so just call that.
0247  */
0248 static void ppp_sync_hangup(struct tty_struct *tty)
0249 {
0250     ppp_sync_close(tty);
0251 }
0252 
0253 /*
0254  * Read does nothing - no data is ever available this way.
0255  * Pppd reads and writes packets via /dev/ppp instead.
0256  */
0257 static ssize_t
0258 ppp_sync_read(struct tty_struct *tty, struct file *file,
0259           unsigned char *buf, size_t count,
0260           void **cookie, unsigned long offset)
0261 {
0262     return -EAGAIN;
0263 }
0264 
0265 /*
0266  * Write on the tty does nothing, the packets all come in
0267  * from the ppp generic stuff.
0268  */
0269 static ssize_t
0270 ppp_sync_write(struct tty_struct *tty, struct file *file,
0271         const unsigned char *buf, size_t count)
0272 {
0273     return -EAGAIN;
0274 }
0275 
0276 static int
0277 ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
0278 {
0279     struct syncppp *ap = sp_get(tty);
0280     int __user *p = (int __user *)arg;
0281     int err, val;
0282 
0283     if (!ap)
0284         return -ENXIO;
0285     err = -EFAULT;
0286     switch (cmd) {
0287     case PPPIOCGCHAN:
0288         err = -EFAULT;
0289         if (put_user(ppp_channel_index(&ap->chan), p))
0290             break;
0291         err = 0;
0292         break;
0293 
0294     case PPPIOCGUNIT:
0295         err = -EFAULT;
0296         if (put_user(ppp_unit_number(&ap->chan), p))
0297             break;
0298         err = 0;
0299         break;
0300 
0301     case TCFLSH:
0302         /* flush our buffers and the serial port's buffer */
0303         if (arg == TCIOFLUSH || arg == TCOFLUSH)
0304             ppp_sync_flush_output(ap);
0305         err = n_tty_ioctl_helper(tty, cmd, arg);
0306         break;
0307 
0308     case FIONREAD:
0309         val = 0;
0310         if (put_user(val, p))
0311             break;
0312         err = 0;
0313         break;
0314 
0315     default:
0316         err = tty_mode_ioctl(tty, cmd, arg);
0317         break;
0318     }
0319 
0320     sp_put(ap);
0321     return err;
0322 }
0323 
0324 /* No kernel lock - fine */
0325 static __poll_t
0326 ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
0327 {
0328     return 0;
0329 }
0330 
0331 /* May sleep, don't call from interrupt level or with interrupts disabled */
0332 static void
0333 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
0334           const char *cflags, int count)
0335 {
0336     struct syncppp *ap = sp_get(tty);
0337     unsigned long flags;
0338 
0339     if (!ap)
0340         return;
0341     spin_lock_irqsave(&ap->recv_lock, flags);
0342     ppp_sync_input(ap, buf, cflags, count);
0343     spin_unlock_irqrestore(&ap->recv_lock, flags);
0344     if (!skb_queue_empty(&ap->rqueue))
0345         tasklet_schedule(&ap->tsk);
0346     sp_put(ap);
0347     tty_unthrottle(tty);
0348 }
0349 
0350 static void
0351 ppp_sync_wakeup(struct tty_struct *tty)
0352 {
0353     struct syncppp *ap = sp_get(tty);
0354 
0355     clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
0356     if (!ap)
0357         return;
0358     set_bit(XMIT_WAKEUP, &ap->xmit_flags);
0359     tasklet_schedule(&ap->tsk);
0360     sp_put(ap);
0361 }
0362 
0363 
0364 static struct tty_ldisc_ops ppp_sync_ldisc = {
0365     .owner  = THIS_MODULE,
0366     .num    = N_SYNC_PPP,
0367     .name   = "pppsync",
0368     .open   = ppp_sync_open,
0369     .close  = ppp_sync_close,
0370     .hangup = ppp_sync_hangup,
0371     .read   = ppp_sync_read,
0372     .write  = ppp_sync_write,
0373     .ioctl  = ppp_synctty_ioctl,
0374     .poll   = ppp_sync_poll,
0375     .receive_buf = ppp_sync_receive,
0376     .write_wakeup = ppp_sync_wakeup,
0377 };
0378 
0379 static int __init
0380 ppp_sync_init(void)
0381 {
0382     int err;
0383 
0384     err = tty_register_ldisc(&ppp_sync_ldisc);
0385     if (err != 0)
0386         printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
0387                err);
0388     return err;
0389 }
0390 
0391 /*
0392  * The following routines provide the PPP channel interface.
0393  */
0394 static int
0395 ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
0396 {
0397     struct syncppp *ap = chan->private;
0398     int err, val;
0399     u32 accm[8];
0400     void __user *argp = (void __user *)arg;
0401     u32 __user *p = argp;
0402 
0403     err = -EFAULT;
0404     switch (cmd) {
0405     case PPPIOCGFLAGS:
0406         val = ap->flags | ap->rbits;
0407         if (put_user(val, (int __user *) argp))
0408             break;
0409         err = 0;
0410         break;
0411     case PPPIOCSFLAGS:
0412         if (get_user(val, (int __user *) argp))
0413             break;
0414         ap->flags = val & ~SC_RCV_BITS;
0415         spin_lock_irq(&ap->recv_lock);
0416         ap->rbits = val & SC_RCV_BITS;
0417         spin_unlock_irq(&ap->recv_lock);
0418         err = 0;
0419         break;
0420 
0421     case PPPIOCGASYNCMAP:
0422         if (put_user(ap->xaccm[0], p))
0423             break;
0424         err = 0;
0425         break;
0426     case PPPIOCSASYNCMAP:
0427         if (get_user(ap->xaccm[0], p))
0428             break;
0429         err = 0;
0430         break;
0431 
0432     case PPPIOCGRASYNCMAP:
0433         if (put_user(ap->raccm, p))
0434             break;
0435         err = 0;
0436         break;
0437     case PPPIOCSRASYNCMAP:
0438         if (get_user(ap->raccm, p))
0439             break;
0440         err = 0;
0441         break;
0442 
0443     case PPPIOCGXASYNCMAP:
0444         if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
0445             break;
0446         err = 0;
0447         break;
0448     case PPPIOCSXASYNCMAP:
0449         if (copy_from_user(accm, argp, sizeof(accm)))
0450             break;
0451         accm[2] &= ~0x40000000U;    /* can't escape 0x5e */
0452         accm[3] |= 0x60000000U;     /* must escape 0x7d, 0x7e */
0453         memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
0454         err = 0;
0455         break;
0456 
0457     case PPPIOCGMRU:
0458         if (put_user(ap->mru, (int __user *) argp))
0459             break;
0460         err = 0;
0461         break;
0462     case PPPIOCSMRU:
0463         if (get_user(val, (int __user *) argp))
0464             break;
0465         if (val < PPP_MRU)
0466             val = PPP_MRU;
0467         ap->mru = val;
0468         err = 0;
0469         break;
0470 
0471     default:
0472         err = -ENOTTY;
0473     }
0474     return err;
0475 }
0476 
0477 /*
0478  * This is called at softirq level to deliver received packets
0479  * to the ppp_generic code, and to tell the ppp_generic code
0480  * if we can accept more output now.
0481  */
0482 static void ppp_sync_process(struct tasklet_struct *t)
0483 {
0484     struct syncppp *ap = from_tasklet(ap, t, tsk);
0485     struct sk_buff *skb;
0486 
0487     /* process received packets */
0488     while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
0489         if (skb->len == 0) {
0490             /* zero length buffers indicate error */
0491             ppp_input_error(&ap->chan, 0);
0492             kfree_skb(skb);
0493         }
0494         else
0495             ppp_input(&ap->chan, skb);
0496     }
0497 
0498     /* try to push more stuff out */
0499     if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
0500         ppp_output_wakeup(&ap->chan);
0501 }
0502 
0503 /*
0504  * Procedures for encapsulation and framing.
0505  */
0506 
0507 static struct sk_buff*
0508 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
0509 {
0510     int proto;
0511     unsigned char *data;
0512     int islcp;
0513 
0514     data  = skb->data;
0515     proto = get_unaligned_be16(data);
0516 
0517     /* LCP packets with codes between 1 (configure-request)
0518      * and 7 (code-reject) must be sent as though no options
0519      * have been negotiated.
0520      */
0521     islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
0522 
0523     /* compress protocol field if option enabled */
0524     if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
0525         skb_pull(skb,1);
0526 
0527     /* prepend address/control fields if necessary */
0528     if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
0529         if (skb_headroom(skb) < 2) {
0530             struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
0531             if (npkt == NULL) {
0532                 kfree_skb(skb);
0533                 return NULL;
0534             }
0535             skb_reserve(npkt,2);
0536             skb_copy_from_linear_data(skb,
0537                       skb_put(npkt, skb->len), skb->len);
0538             consume_skb(skb);
0539             skb = npkt;
0540         }
0541         skb_push(skb,2);
0542         skb->data[0] = PPP_ALLSTATIONS;
0543         skb->data[1] = PPP_UI;
0544     }
0545 
0546     ap->last_xmit = jiffies;
0547 
0548     if (skb && ap->flags & SC_LOG_OUTPKT)
0549         ppp_print_buffer ("send buffer", skb->data, skb->len);
0550 
0551     return skb;
0552 }
0553 
0554 /*
0555  * Transmit-side routines.
0556  */
0557 
0558 /*
0559  * Send a packet to the peer over an sync tty line.
0560  * Returns 1 iff the packet was accepted.
0561  * If the packet was not accepted, we will call ppp_output_wakeup
0562  * at some later time.
0563  */
0564 static int
0565 ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
0566 {
0567     struct syncppp *ap = chan->private;
0568 
0569     ppp_sync_push(ap);
0570 
0571     if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
0572         return 0;   /* already full */
0573     skb = ppp_sync_txmunge(ap, skb);
0574     if (skb != NULL)
0575         ap->tpkt = skb;
0576     else
0577         clear_bit(XMIT_FULL, &ap->xmit_flags);
0578 
0579     ppp_sync_push(ap);
0580     return 1;
0581 }
0582 
0583 /*
0584  * Push as much data as possible out to the tty.
0585  */
0586 static int
0587 ppp_sync_push(struct syncppp *ap)
0588 {
0589     int sent, done = 0;
0590     struct tty_struct *tty = ap->tty;
0591     int tty_stuffed = 0;
0592 
0593     if (!spin_trylock_bh(&ap->xmit_lock))
0594         return 0;
0595     for (;;) {
0596         if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
0597             tty_stuffed = 0;
0598         if (!tty_stuffed && ap->tpkt) {
0599             set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
0600             sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
0601             if (sent < 0)
0602                 goto flush; /* error, e.g. loss of CD */
0603             if (sent < ap->tpkt->len) {
0604                 tty_stuffed = 1;
0605             } else {
0606                 consume_skb(ap->tpkt);
0607                 ap->tpkt = NULL;
0608                 clear_bit(XMIT_FULL, &ap->xmit_flags);
0609                 done = 1;
0610             }
0611             continue;
0612         }
0613         /* haven't made any progress */
0614         spin_unlock_bh(&ap->xmit_lock);
0615         if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
0616               (!tty_stuffed && ap->tpkt)))
0617             break;
0618         if (!spin_trylock_bh(&ap->xmit_lock))
0619             break;
0620     }
0621     return done;
0622 
0623 flush:
0624     if (ap->tpkt) {
0625         kfree_skb(ap->tpkt);
0626         ap->tpkt = NULL;
0627         clear_bit(XMIT_FULL, &ap->xmit_flags);
0628         done = 1;
0629     }
0630     spin_unlock_bh(&ap->xmit_lock);
0631     return done;
0632 }
0633 
0634 /*
0635  * Flush output from our internal buffers.
0636  * Called for the TCFLSH ioctl.
0637  */
0638 static void
0639 ppp_sync_flush_output(struct syncppp *ap)
0640 {
0641     int done = 0;
0642 
0643     spin_lock_bh(&ap->xmit_lock);
0644     if (ap->tpkt != NULL) {
0645         kfree_skb(ap->tpkt);
0646         ap->tpkt = NULL;
0647         clear_bit(XMIT_FULL, &ap->xmit_flags);
0648         done = 1;
0649     }
0650     spin_unlock_bh(&ap->xmit_lock);
0651     if (done)
0652         ppp_output_wakeup(&ap->chan);
0653 }
0654 
0655 /*
0656  * Receive-side routines.
0657  */
0658 
0659 /* called when the tty driver has data for us.
0660  *
0661  * Data is frame oriented: each call to ppp_sync_input is considered
0662  * a whole frame. If the 1st flag byte is non-zero then the whole
0663  * frame is considered to be in error and is tossed.
0664  */
0665 static void
0666 ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
0667         const char *flags, int count)
0668 {
0669     struct sk_buff *skb;
0670     unsigned char *p;
0671 
0672     if (count == 0)
0673         return;
0674 
0675     if (ap->flags & SC_LOG_INPKT)
0676         ppp_print_buffer ("receive buffer", buf, count);
0677 
0678     /* stuff the chars in the skb */
0679     skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
0680     if (!skb) {
0681         printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
0682         goto err;
0683     }
0684     /* Try to get the payload 4-byte aligned */
0685     if (buf[0] != PPP_ALLSTATIONS)
0686         skb_reserve(skb, 2 + (buf[0] & 1));
0687 
0688     if (flags && *flags) {
0689         /* error flag set, ignore frame */
0690         goto err;
0691     } else if (count > skb_tailroom(skb)) {
0692         /* packet overflowed MRU */
0693         goto err;
0694     }
0695 
0696     skb_put_data(skb, buf, count);
0697 
0698     /* strip address/control field if present */
0699     p = skb->data;
0700     if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
0701         /* chop off address/control */
0702         if (skb->len < 3)
0703             goto err;
0704         p = skb_pull(skb, 2);
0705     }
0706 
0707     /* PPP packet length should be >= 2 bytes when protocol field is not
0708      * compressed.
0709      */
0710     if (!(p[0] & 0x01) && skb->len < 2)
0711         goto err;
0712 
0713     /* queue the frame to be processed */
0714     skb_queue_tail(&ap->rqueue, skb);
0715     return;
0716 
0717 err:
0718     /* queue zero length packet as error indication */
0719     if (skb || (skb = dev_alloc_skb(0))) {
0720         skb_trim(skb, 0);
0721         skb_queue_tail(&ap->rqueue, skb);
0722     }
0723 }
0724 
0725 static void __exit
0726 ppp_sync_cleanup(void)
0727 {
0728     tty_unregister_ldisc(&ppp_sync_ldisc);
0729 }
0730 
0731 module_init(ppp_sync_init);
0732 module_exit(ppp_sync_cleanup);
0733 MODULE_LICENSE("GPL");
0734 MODULE_ALIAS_LDISC(N_SYNC_PPP);