Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-1.0+
0002 /*
0003  * Device driver for Microgate SyncLink GT serial adapters.
0004  *
0005  * written by Paul Fulghum for Microgate Corporation
0006  * paulkf@microgate.com
0007  *
0008  * Microgate and SyncLink are trademarks of Microgate Corporation
0009  *
0010  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
0011  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
0012  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
0013  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
0014  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0015  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
0016  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0017  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
0018  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0019  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
0020  * OF THE POSSIBILITY OF SUCH DAMAGE.
0021  */
0022 
0023 /*
0024  * DEBUG OUTPUT DEFINITIONS
0025  *
0026  * uncomment lines below to enable specific types of debug output
0027  *
0028  * DBGINFO   information - most verbose output
0029  * DBGERR    serious errors
0030  * DBGBH     bottom half service routine debugging
0031  * DBGISR    interrupt service routine debugging
0032  * DBGDATA   output receive and transmit data
0033  * DBGTBUF   output transmit DMA buffers and registers
0034  * DBGRBUF   output receive DMA buffers and registers
0035  */
0036 
0037 #define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
0038 #define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
0039 #define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
0040 #define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
0041 #define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
0042 /*#define DBGTBUF(info) dump_tbufs(info)*/
0043 /*#define DBGRBUF(info) dump_rbufs(info)*/
0044 
0045 
0046 #include <linux/module.h>
0047 #include <linux/errno.h>
0048 #include <linux/signal.h>
0049 #include <linux/sched.h>
0050 #include <linux/timer.h>
0051 #include <linux/interrupt.h>
0052 #include <linux/pci.h>
0053 #include <linux/tty.h>
0054 #include <linux/tty_flip.h>
0055 #include <linux/serial.h>
0056 #include <linux/major.h>
0057 #include <linux/string.h>
0058 #include <linux/fcntl.h>
0059 #include <linux/ptrace.h>
0060 #include <linux/ioport.h>
0061 #include <linux/mm.h>
0062 #include <linux/seq_file.h>
0063 #include <linux/slab.h>
0064 #include <linux/netdevice.h>
0065 #include <linux/vmalloc.h>
0066 #include <linux/init.h>
0067 #include <linux/delay.h>
0068 #include <linux/ioctl.h>
0069 #include <linux/termios.h>
0070 #include <linux/bitops.h>
0071 #include <linux/workqueue.h>
0072 #include <linux/hdlc.h>
0073 #include <linux/synclink.h>
0074 
0075 #include <asm/io.h>
0076 #include <asm/irq.h>
0077 #include <asm/dma.h>
0078 #include <asm/types.h>
0079 #include <linux/uaccess.h>
0080 
0081 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
0082 #define SYNCLINK_GENERIC_HDLC 1
0083 #else
0084 #define SYNCLINK_GENERIC_HDLC 0
0085 #endif
0086 
0087 /*
0088  * module identification
0089  */
0090 static char *driver_name     = "SyncLink GT";
0091 static char *slgt_driver_name = "synclink_gt";
0092 static char *tty_dev_prefix  = "ttySLG";
0093 MODULE_LICENSE("GPL");
0094 #define MGSL_MAGIC 0x5401
0095 #define MAX_DEVICES 32
0096 
0097 static const struct pci_device_id pci_table[] = {
0098     {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
0099     {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
0100     {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
0101     {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
0102     {0,}, /* terminate list */
0103 };
0104 MODULE_DEVICE_TABLE(pci, pci_table);
0105 
0106 static int  init_one(struct pci_dev *dev,const struct pci_device_id *ent);
0107 static void remove_one(struct pci_dev *dev);
0108 static struct pci_driver pci_driver = {
0109     .name       = "synclink_gt",
0110     .id_table   = pci_table,
0111     .probe      = init_one,
0112     .remove     = remove_one,
0113 };
0114 
0115 static bool pci_registered;
0116 
0117 /*
0118  * module configuration and status
0119  */
0120 static struct slgt_info *slgt_device_list;
0121 static int slgt_device_count;
0122 
0123 static int ttymajor;
0124 static int debug_level;
0125 static int maxframe[MAX_DEVICES];
0126 
0127 module_param(ttymajor, int, 0);
0128 module_param(debug_level, int, 0);
0129 module_param_array(maxframe, int, NULL, 0);
0130 
0131 MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
0132 MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
0133 MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
0134 
0135 /*
0136  * tty support and callbacks
0137  */
0138 static struct tty_driver *serial_driver;
0139 
0140 static void wait_until_sent(struct tty_struct *tty, int timeout);
0141 static void flush_buffer(struct tty_struct *tty);
0142 static void tx_release(struct tty_struct *tty);
0143 
0144 /*
0145  * generic HDLC support
0146  */
0147 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
0148 
0149 
0150 /*
0151  * device specific structures, macros and functions
0152  */
0153 
0154 #define SLGT_MAX_PORTS 4
0155 #define SLGT_REG_SIZE  256
0156 
0157 /*
0158  * conditional wait facility
0159  */
0160 struct cond_wait {
0161     struct cond_wait *next;
0162     wait_queue_head_t q;
0163     wait_queue_entry_t wait;
0164     unsigned int data;
0165 };
0166 static void flush_cond_wait(struct cond_wait **head);
0167 
0168 /*
0169  * DMA buffer descriptor and access macros
0170  */
0171 struct slgt_desc
0172 {
0173     __le16 count;
0174     __le16 status;
0175     __le32 pbuf;  /* physical address of data buffer */
0176     __le32 next;  /* physical address of next descriptor */
0177 
0178     /* driver book keeping */
0179     char *buf;          /* virtual  address of data buffer */
0180         unsigned int pdesc; /* physical address of this descriptor */
0181     dma_addr_t buf_dma_addr;
0182     unsigned short buf_count;
0183 };
0184 
0185 #define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
0186 #define set_desc_next(a,b) (a).next   = cpu_to_le32((unsigned int)(b))
0187 #define set_desc_count(a,b)(a).count  = cpu_to_le16((unsigned short)(b))
0188 #define set_desc_eof(a,b)  (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
0189 #define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
0190 #define desc_count(a)      (le16_to_cpu((a).count))
0191 #define desc_status(a)     (le16_to_cpu((a).status))
0192 #define desc_complete(a)   (le16_to_cpu((a).status) & BIT15)
0193 #define desc_eof(a)        (le16_to_cpu((a).status) & BIT2)
0194 #define desc_crc_error(a)  (le16_to_cpu((a).status) & BIT1)
0195 #define desc_abort(a)      (le16_to_cpu((a).status) & BIT0)
0196 #define desc_residue(a)    ((le16_to_cpu((a).status) & 0x38) >> 3)
0197 
0198 struct _input_signal_events {
0199     int ri_up;
0200     int ri_down;
0201     int dsr_up;
0202     int dsr_down;
0203     int dcd_up;
0204     int dcd_down;
0205     int cts_up;
0206     int cts_down;
0207 };
0208 
0209 /*
0210  * device instance data structure
0211  */
0212 struct slgt_info {
0213     void *if_ptr;       /* General purpose pointer (used by SPPP) */
0214     struct tty_port port;
0215 
0216     struct slgt_info *next_device;  /* device list link */
0217 
0218     int magic;
0219 
0220     char device_name[25];
0221     struct pci_dev *pdev;
0222 
0223     int port_count;  /* count of ports on adapter */
0224     int adapter_num; /* adapter instance number */
0225     int port_num;    /* port instance number */
0226 
0227     /* array of pointers to port contexts on this adapter */
0228     struct slgt_info *port_array[SLGT_MAX_PORTS];
0229 
0230     int         line;       /* tty line instance number */
0231 
0232     struct mgsl_icount  icount;
0233 
0234     int         timeout;
0235     int         x_char;     /* xon/xoff character */
0236     unsigned int        read_status_mask;
0237     unsigned int        ignore_status_mask;
0238 
0239     wait_queue_head_t   status_event_wait_q;
0240     wait_queue_head_t   event_wait_q;
0241     struct timer_list   tx_timer;
0242     struct timer_list   rx_timer;
0243 
0244     unsigned int            gpio_present;
0245     struct cond_wait        *gpio_wait_q;
0246 
0247     spinlock_t lock;    /* spinlock for synchronizing with ISR */
0248 
0249     struct work_struct task;
0250     u32 pending_bh;
0251     bool bh_requested;
0252     bool bh_running;
0253 
0254     int isr_overflow;
0255     bool irq_requested; /* true if IRQ requested */
0256     bool irq_occurred;  /* for diagnostics use */
0257 
0258     /* device configuration */
0259 
0260     unsigned int bus_type;
0261     unsigned int irq_level;
0262     unsigned long irq_flags;
0263 
0264     unsigned char __iomem * reg_addr;  /* memory mapped registers address */
0265     u32 phys_reg_addr;
0266     bool reg_addr_requested;
0267 
0268     MGSL_PARAMS params;       /* communications parameters */
0269     u32 idle_mode;
0270     u32 max_frame_size;       /* as set by device config */
0271 
0272     unsigned int rbuf_fill_level;
0273     unsigned int rx_pio;
0274     unsigned int if_mode;
0275     unsigned int base_clock;
0276     unsigned int xsync;
0277     unsigned int xctrl;
0278 
0279     /* device status */
0280 
0281     bool rx_enabled;
0282     bool rx_restart;
0283 
0284     bool tx_enabled;
0285     bool tx_active;
0286 
0287     unsigned char signals;    /* serial signal states */
0288     int init_error;  /* initialization error */
0289 
0290     unsigned char *tx_buf;
0291     int tx_count;
0292 
0293     char *flag_buf;
0294     bool drop_rts_on_tx_done;
0295     struct  _input_signal_events    input_signal_events;
0296 
0297     int dcd_chkcount;   /* check counts to prevent */
0298     int cts_chkcount;   /* too many IRQs if a signal */
0299     int dsr_chkcount;   /* is floating */
0300     int ri_chkcount;
0301 
0302     char *bufs;     /* virtual address of DMA buffer lists */
0303     dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
0304 
0305     unsigned int rbuf_count;
0306     struct slgt_desc *rbufs;
0307     unsigned int rbuf_current;
0308     unsigned int rbuf_index;
0309     unsigned int rbuf_fill_index;
0310     unsigned short rbuf_fill_count;
0311 
0312     unsigned int tbuf_count;
0313     struct slgt_desc *tbufs;
0314     unsigned int tbuf_current;
0315     unsigned int tbuf_start;
0316 
0317     unsigned char *tmp_rbuf;
0318     unsigned int tmp_rbuf_count;
0319 
0320     /* SPPP/Cisco HDLC device parts */
0321 
0322     int netcount;
0323     spinlock_t netlock;
0324 #if SYNCLINK_GENERIC_HDLC
0325     struct net_device *netdev;
0326 #endif
0327 
0328 };
0329 
0330 static MGSL_PARAMS default_params = {
0331     .mode            = MGSL_MODE_HDLC,
0332     .loopback        = 0,
0333     .flags           = HDLC_FLAG_UNDERRUN_ABORT15,
0334     .encoding        = HDLC_ENCODING_NRZI_SPACE,
0335     .clock_speed     = 0,
0336     .addr_filter     = 0xff,
0337     .crc_type        = HDLC_CRC_16_CCITT,
0338     .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
0339     .preamble        = HDLC_PREAMBLE_PATTERN_NONE,
0340     .data_rate       = 9600,
0341     .data_bits       = 8,
0342     .stop_bits       = 1,
0343     .parity          = ASYNC_PARITY_NONE
0344 };
0345 
0346 
0347 #define BH_RECEIVE  1
0348 #define BH_TRANSMIT 2
0349 #define BH_STATUS   4
0350 #define IO_PIN_SHUTDOWN_LIMIT 100
0351 
0352 #define DMABUFSIZE 256
0353 #define DESC_LIST_SIZE 4096
0354 
0355 #define MASK_PARITY  BIT1
0356 #define MASK_FRAMING BIT0
0357 #define MASK_BREAK   BIT14
0358 #define MASK_OVERRUN BIT4
0359 
0360 #define GSR   0x00 /* global status */
0361 #define JCR   0x04 /* JTAG control */
0362 #define IODR  0x08 /* GPIO direction */
0363 #define IOER  0x0c /* GPIO interrupt enable */
0364 #define IOVR  0x10 /* GPIO value */
0365 #define IOSR  0x14 /* GPIO interrupt status */
0366 #define TDR   0x80 /* tx data */
0367 #define RDR   0x80 /* rx data */
0368 #define TCR   0x82 /* tx control */
0369 #define TIR   0x84 /* tx idle */
0370 #define TPR   0x85 /* tx preamble */
0371 #define RCR   0x86 /* rx control */
0372 #define VCR   0x88 /* V.24 control */
0373 #define CCR   0x89 /* clock control */
0374 #define BDR   0x8a /* baud divisor */
0375 #define SCR   0x8c /* serial control */
0376 #define SSR   0x8e /* serial status */
0377 #define RDCSR 0x90 /* rx DMA control/status */
0378 #define TDCSR 0x94 /* tx DMA control/status */
0379 #define RDDAR 0x98 /* rx DMA descriptor address */
0380 #define TDDAR 0x9c /* tx DMA descriptor address */
0381 #define XSR   0x40 /* extended sync pattern */
0382 #define XCR   0x44 /* extended control */
0383 
0384 #define RXIDLE      BIT14
0385 #define RXBREAK     BIT14
0386 #define IRQ_TXDATA  BIT13
0387 #define IRQ_TXIDLE  BIT12
0388 #define IRQ_TXUNDER BIT11 /* HDLC */
0389 #define IRQ_RXDATA  BIT10
0390 #define IRQ_RXIDLE  BIT9  /* HDLC */
0391 #define IRQ_RXBREAK BIT9  /* async */
0392 #define IRQ_RXOVER  BIT8
0393 #define IRQ_DSR     BIT7
0394 #define IRQ_CTS     BIT6
0395 #define IRQ_DCD     BIT5
0396 #define IRQ_RI      BIT4
0397 #define IRQ_ALL     0x3ff0
0398 #define IRQ_MASTER  BIT0
0399 
0400 #define slgt_irq_on(info, mask) \
0401     wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
0402 #define slgt_irq_off(info, mask) \
0403     wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
0404 
0405 static __u8  rd_reg8(struct slgt_info *info, unsigned int addr);
0406 static void  wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
0407 static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
0408 static void  wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
0409 static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
0410 static void  wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
0411 
0412 static void  msc_set_vcr(struct slgt_info *info);
0413 
0414 static int  startup(struct slgt_info *info);
0415 static int  block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
0416 static void shutdown(struct slgt_info *info);
0417 static void program_hw(struct slgt_info *info);
0418 static void change_params(struct slgt_info *info);
0419 
0420 static int  adapter_test(struct slgt_info *info);
0421 
0422 static void reset_port(struct slgt_info *info);
0423 static void async_mode(struct slgt_info *info);
0424 static void sync_mode(struct slgt_info *info);
0425 
0426 static void rx_stop(struct slgt_info *info);
0427 static void rx_start(struct slgt_info *info);
0428 static void reset_rbufs(struct slgt_info *info);
0429 static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
0430 static bool rx_get_frame(struct slgt_info *info);
0431 static bool rx_get_buf(struct slgt_info *info);
0432 
0433 static void tx_start(struct slgt_info *info);
0434 static void tx_stop(struct slgt_info *info);
0435 static void tx_set_idle(struct slgt_info *info);
0436 static unsigned int tbuf_bytes(struct slgt_info *info);
0437 static void reset_tbufs(struct slgt_info *info);
0438 static void tdma_reset(struct slgt_info *info);
0439 static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
0440 
0441 static void get_gtsignals(struct slgt_info *info);
0442 static void set_gtsignals(struct slgt_info *info);
0443 static void set_rate(struct slgt_info *info, u32 data_rate);
0444 
0445 static void bh_transmit(struct slgt_info *info);
0446 static void isr_txeom(struct slgt_info *info, unsigned short status);
0447 
0448 static void tx_timeout(struct timer_list *t);
0449 static void rx_timeout(struct timer_list *t);
0450 
0451 /*
0452  * ioctl handlers
0453  */
0454 static int  get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
0455 static int  get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
0456 static int  set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
0457 static int  get_txidle(struct slgt_info *info, int __user *idle_mode);
0458 static int  set_txidle(struct slgt_info *info, int idle_mode);
0459 static int  tx_enable(struct slgt_info *info, int enable);
0460 static int  tx_abort(struct slgt_info *info);
0461 static int  rx_enable(struct slgt_info *info, int enable);
0462 static int  modem_input_wait(struct slgt_info *info,int arg);
0463 static int  wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
0464 static int  get_interface(struct slgt_info *info, int __user *if_mode);
0465 static int  set_interface(struct slgt_info *info, int if_mode);
0466 static int  set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
0467 static int  get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
0468 static int  wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
0469 static int  get_xsync(struct slgt_info *info, int __user *if_mode);
0470 static int  set_xsync(struct slgt_info *info, int if_mode);
0471 static int  get_xctrl(struct slgt_info *info, int __user *if_mode);
0472 static int  set_xctrl(struct slgt_info *info, int if_mode);
0473 
0474 /*
0475  * driver functions
0476  */
0477 static void release_resources(struct slgt_info *info);
0478 
0479 /*
0480  * DEBUG OUTPUT CODE
0481  */
0482 #ifndef DBGINFO
0483 #define DBGINFO(fmt)
0484 #endif
0485 #ifndef DBGERR
0486 #define DBGERR(fmt)
0487 #endif
0488 #ifndef DBGBH
0489 #define DBGBH(fmt)
0490 #endif
0491 #ifndef DBGISR
0492 #define DBGISR(fmt)
0493 #endif
0494 
0495 #ifdef DBGDATA
0496 static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
0497 {
0498     int i;
0499     int linecount;
0500     printk("%s %s data:\n",info->device_name, label);
0501     while(count) {
0502         linecount = (count > 16) ? 16 : count;
0503         for(i=0; i < linecount; i++)
0504             printk("%02X ",(unsigned char)data[i]);
0505         for(;i<17;i++)
0506             printk("   ");
0507         for(i=0;i<linecount;i++) {
0508             if (data[i]>=040 && data[i]<=0176)
0509                 printk("%c",data[i]);
0510             else
0511                 printk(".");
0512         }
0513         printk("\n");
0514         data  += linecount;
0515         count -= linecount;
0516     }
0517 }
0518 #else
0519 #define DBGDATA(info, buf, size, label)
0520 #endif
0521 
0522 #ifdef DBGTBUF
0523 static void dump_tbufs(struct slgt_info *info)
0524 {
0525     int i;
0526     printk("tbuf_current=%d\n", info->tbuf_current);
0527     for (i=0 ; i < info->tbuf_count ; i++) {
0528         printk("%d: count=%04X status=%04X\n",
0529             i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
0530     }
0531 }
0532 #else
0533 #define DBGTBUF(info)
0534 #endif
0535 
0536 #ifdef DBGRBUF
0537 static void dump_rbufs(struct slgt_info *info)
0538 {
0539     int i;
0540     printk("rbuf_current=%d\n", info->rbuf_current);
0541     for (i=0 ; i < info->rbuf_count ; i++) {
0542         printk("%d: count=%04X status=%04X\n",
0543             i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
0544     }
0545 }
0546 #else
0547 #define DBGRBUF(info)
0548 #endif
0549 
0550 static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
0551 {
0552 #ifdef SANITY_CHECK
0553     if (!info) {
0554         printk("null struct slgt_info for (%s) in %s\n", devname, name);
0555         return 1;
0556     }
0557     if (info->magic != MGSL_MAGIC) {
0558         printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
0559         return 1;
0560     }
0561 #else
0562     if (!info)
0563         return 1;
0564 #endif
0565     return 0;
0566 }
0567 
0568 /*
0569  * line discipline callback wrappers
0570  *
0571  * The wrappers maintain line discipline references
0572  * while calling into the line discipline.
0573  *
0574  * ldisc_receive_buf  - pass receive data to line discipline
0575  */
0576 static void ldisc_receive_buf(struct tty_struct *tty,
0577                   const __u8 *data, char *flags, int count)
0578 {
0579     struct tty_ldisc *ld;
0580     if (!tty)
0581         return;
0582     ld = tty_ldisc_ref(tty);
0583     if (ld) {
0584         if (ld->ops->receive_buf)
0585             ld->ops->receive_buf(tty, data, flags, count);
0586         tty_ldisc_deref(ld);
0587     }
0588 }
0589 
0590 /* tty callbacks */
0591 
0592 static int open(struct tty_struct *tty, struct file *filp)
0593 {
0594     struct slgt_info *info;
0595     int retval, line;
0596     unsigned long flags;
0597 
0598     line = tty->index;
0599     if (line >= slgt_device_count) {
0600         DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
0601         return -ENODEV;
0602     }
0603 
0604     info = slgt_device_list;
0605     while(info && info->line != line)
0606         info = info->next_device;
0607     if (sanity_check(info, tty->name, "open"))
0608         return -ENODEV;
0609     if (info->init_error) {
0610         DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
0611         return -ENODEV;
0612     }
0613 
0614     tty->driver_data = info;
0615     info->port.tty = tty;
0616 
0617     DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
0618 
0619     mutex_lock(&info->port.mutex);
0620 
0621     spin_lock_irqsave(&info->netlock, flags);
0622     if (info->netcount) {
0623         retval = -EBUSY;
0624         spin_unlock_irqrestore(&info->netlock, flags);
0625         mutex_unlock(&info->port.mutex);
0626         goto cleanup;
0627     }
0628     info->port.count++;
0629     spin_unlock_irqrestore(&info->netlock, flags);
0630 
0631     if (info->port.count == 1) {
0632         /* 1st open on this device, init hardware */
0633         retval = startup(info);
0634         if (retval < 0) {
0635             mutex_unlock(&info->port.mutex);
0636             goto cleanup;
0637         }
0638     }
0639     mutex_unlock(&info->port.mutex);
0640     retval = block_til_ready(tty, filp, info);
0641     if (retval) {
0642         DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
0643         goto cleanup;
0644     }
0645 
0646     retval = 0;
0647 
0648 cleanup:
0649     if (retval) {
0650         if (tty->count == 1)
0651             info->port.tty = NULL; /* tty layer will release tty struct */
0652         if(info->port.count)
0653             info->port.count--;
0654     }
0655 
0656     DBGINFO(("%s open rc=%d\n", info->device_name, retval));
0657     return retval;
0658 }
0659 
0660 static void close(struct tty_struct *tty, struct file *filp)
0661 {
0662     struct slgt_info *info = tty->driver_data;
0663 
0664     if (sanity_check(info, tty->name, "close"))
0665         return;
0666     DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
0667 
0668     if (tty_port_close_start(&info->port, tty, filp) == 0)
0669         goto cleanup;
0670 
0671     mutex_lock(&info->port.mutex);
0672     if (tty_port_initialized(&info->port))
0673         wait_until_sent(tty, info->timeout);
0674     flush_buffer(tty);
0675     tty_ldisc_flush(tty);
0676 
0677     shutdown(info);
0678     mutex_unlock(&info->port.mutex);
0679 
0680     tty_port_close_end(&info->port, tty);
0681     info->port.tty = NULL;
0682 cleanup:
0683     DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
0684 }
0685 
0686 static void hangup(struct tty_struct *tty)
0687 {
0688     struct slgt_info *info = tty->driver_data;
0689     unsigned long flags;
0690 
0691     if (sanity_check(info, tty->name, "hangup"))
0692         return;
0693     DBGINFO(("%s hangup\n", info->device_name));
0694 
0695     flush_buffer(tty);
0696 
0697     mutex_lock(&info->port.mutex);
0698     shutdown(info);
0699 
0700     spin_lock_irqsave(&info->port.lock, flags);
0701     info->port.count = 0;
0702     info->port.tty = NULL;
0703     spin_unlock_irqrestore(&info->port.lock, flags);
0704     tty_port_set_active(&info->port, 0);
0705     mutex_unlock(&info->port.mutex);
0706 
0707     wake_up_interruptible(&info->port.open_wait);
0708 }
0709 
0710 static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
0711 {
0712     struct slgt_info *info = tty->driver_data;
0713     unsigned long flags;
0714 
0715     DBGINFO(("%s set_termios\n", tty->driver->name));
0716 
0717     change_params(info);
0718 
0719     /* Handle transition to B0 status */
0720     if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
0721         info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
0722         spin_lock_irqsave(&info->lock,flags);
0723         set_gtsignals(info);
0724         spin_unlock_irqrestore(&info->lock,flags);
0725     }
0726 
0727     /* Handle transition away from B0 status */
0728     if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
0729         info->signals |= SerialSignal_DTR;
0730         if (!C_CRTSCTS(tty) || !tty_throttled(tty))
0731             info->signals |= SerialSignal_RTS;
0732         spin_lock_irqsave(&info->lock,flags);
0733         set_gtsignals(info);
0734         spin_unlock_irqrestore(&info->lock,flags);
0735     }
0736 
0737     /* Handle turning off CRTSCTS */
0738     if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
0739         tty->hw_stopped = 0;
0740         tx_release(tty);
0741     }
0742 }
0743 
0744 static void update_tx_timer(struct slgt_info *info)
0745 {
0746     /*
0747      * use worst case speed of 1200bps to calculate transmit timeout
0748      * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
0749      */
0750     if (info->params.mode == MGSL_MODE_HDLC) {
0751         int timeout  = (tbuf_bytes(info) * 7) + 1000;
0752         mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
0753     }
0754 }
0755 
0756 static int write(struct tty_struct *tty,
0757          const unsigned char *buf, int count)
0758 {
0759     int ret = 0;
0760     struct slgt_info *info = tty->driver_data;
0761     unsigned long flags;
0762 
0763     if (sanity_check(info, tty->name, "write"))
0764         return -EIO;
0765 
0766     DBGINFO(("%s write count=%d\n", info->device_name, count));
0767 
0768     if (!info->tx_buf || (count > info->max_frame_size))
0769         return -EIO;
0770 
0771     if (!count || tty->flow.stopped || tty->hw_stopped)
0772         return 0;
0773 
0774     spin_lock_irqsave(&info->lock, flags);
0775 
0776     if (info->tx_count) {
0777         /* send accumulated data from send_char() */
0778         if (!tx_load(info, info->tx_buf, info->tx_count))
0779             goto cleanup;
0780         info->tx_count = 0;
0781     }
0782 
0783     if (tx_load(info, buf, count))
0784         ret = count;
0785 
0786 cleanup:
0787     spin_unlock_irqrestore(&info->lock, flags);
0788     DBGINFO(("%s write rc=%d\n", info->device_name, ret));
0789     return ret;
0790 }
0791 
0792 static int put_char(struct tty_struct *tty, unsigned char ch)
0793 {
0794     struct slgt_info *info = tty->driver_data;
0795     unsigned long flags;
0796     int ret = 0;
0797 
0798     if (sanity_check(info, tty->name, "put_char"))
0799         return 0;
0800     DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
0801     if (!info->tx_buf)
0802         return 0;
0803     spin_lock_irqsave(&info->lock,flags);
0804     if (info->tx_count < info->max_frame_size) {
0805         info->tx_buf[info->tx_count++] = ch;
0806         ret = 1;
0807     }
0808     spin_unlock_irqrestore(&info->lock,flags);
0809     return ret;
0810 }
0811 
0812 static void send_xchar(struct tty_struct *tty, char ch)
0813 {
0814     struct slgt_info *info = tty->driver_data;
0815     unsigned long flags;
0816 
0817     if (sanity_check(info, tty->name, "send_xchar"))
0818         return;
0819     DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
0820     info->x_char = ch;
0821     if (ch) {
0822         spin_lock_irqsave(&info->lock,flags);
0823         if (!info->tx_enabled)
0824             tx_start(info);
0825         spin_unlock_irqrestore(&info->lock,flags);
0826     }
0827 }
0828 
0829 static void wait_until_sent(struct tty_struct *tty, int timeout)
0830 {
0831     struct slgt_info *info = tty->driver_data;
0832     unsigned long orig_jiffies, char_time;
0833 
0834     if (!info )
0835         return;
0836     if (sanity_check(info, tty->name, "wait_until_sent"))
0837         return;
0838     DBGINFO(("%s wait_until_sent entry\n", info->device_name));
0839     if (!tty_port_initialized(&info->port))
0840         goto exit;
0841 
0842     orig_jiffies = jiffies;
0843 
0844     /* Set check interval to 1/5 of estimated time to
0845      * send a character, and make it at least 1. The check
0846      * interval should also be less than the timeout.
0847      * Note: use tight timings here to satisfy the NIST-PCTS.
0848      */
0849 
0850     if (info->params.data_rate) {
0851             char_time = info->timeout/(32 * 5);
0852         if (!char_time)
0853             char_time++;
0854     } else
0855         char_time = 1;
0856 
0857     if (timeout)
0858         char_time = min_t(unsigned long, char_time, timeout);
0859 
0860     while (info->tx_active) {
0861         msleep_interruptible(jiffies_to_msecs(char_time));
0862         if (signal_pending(current))
0863             break;
0864         if (timeout && time_after(jiffies, orig_jiffies + timeout))
0865             break;
0866     }
0867 exit:
0868     DBGINFO(("%s wait_until_sent exit\n", info->device_name));
0869 }
0870 
0871 static unsigned int write_room(struct tty_struct *tty)
0872 {
0873     struct slgt_info *info = tty->driver_data;
0874     unsigned int ret;
0875 
0876     if (sanity_check(info, tty->name, "write_room"))
0877         return 0;
0878     ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
0879     DBGINFO(("%s write_room=%u\n", info->device_name, ret));
0880     return ret;
0881 }
0882 
0883 static void flush_chars(struct tty_struct *tty)
0884 {
0885     struct slgt_info *info = tty->driver_data;
0886     unsigned long flags;
0887 
0888     if (sanity_check(info, tty->name, "flush_chars"))
0889         return;
0890     DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
0891 
0892     if (info->tx_count <= 0 || tty->flow.stopped ||
0893         tty->hw_stopped || !info->tx_buf)
0894         return;
0895 
0896     DBGINFO(("%s flush_chars start transmit\n", info->device_name));
0897 
0898     spin_lock_irqsave(&info->lock,flags);
0899     if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
0900         info->tx_count = 0;
0901     spin_unlock_irqrestore(&info->lock,flags);
0902 }
0903 
0904 static void flush_buffer(struct tty_struct *tty)
0905 {
0906     struct slgt_info *info = tty->driver_data;
0907     unsigned long flags;
0908 
0909     if (sanity_check(info, tty->name, "flush_buffer"))
0910         return;
0911     DBGINFO(("%s flush_buffer\n", info->device_name));
0912 
0913     spin_lock_irqsave(&info->lock, flags);
0914     info->tx_count = 0;
0915     spin_unlock_irqrestore(&info->lock, flags);
0916 
0917     tty_wakeup(tty);
0918 }
0919 
0920 /*
0921  * throttle (stop) transmitter
0922  */
0923 static void tx_hold(struct tty_struct *tty)
0924 {
0925     struct slgt_info *info = tty->driver_data;
0926     unsigned long flags;
0927 
0928     if (sanity_check(info, tty->name, "tx_hold"))
0929         return;
0930     DBGINFO(("%s tx_hold\n", info->device_name));
0931     spin_lock_irqsave(&info->lock,flags);
0932     if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
0933         tx_stop(info);
0934     spin_unlock_irqrestore(&info->lock,flags);
0935 }
0936 
0937 /*
0938  * release (start) transmitter
0939  */
0940 static void tx_release(struct tty_struct *tty)
0941 {
0942     struct slgt_info *info = tty->driver_data;
0943     unsigned long flags;
0944 
0945     if (sanity_check(info, tty->name, "tx_release"))
0946         return;
0947     DBGINFO(("%s tx_release\n", info->device_name));
0948     spin_lock_irqsave(&info->lock, flags);
0949     if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
0950         info->tx_count = 0;
0951     spin_unlock_irqrestore(&info->lock, flags);
0952 }
0953 
0954 /*
0955  * Service an IOCTL request
0956  *
0957  * Arguments
0958  *
0959  *  tty pointer to tty instance data
0960  *  cmd IOCTL command code
0961  *  arg command argument/context
0962  *
0963  * Return 0 if success, otherwise error code
0964  */
0965 static int ioctl(struct tty_struct *tty,
0966          unsigned int cmd, unsigned long arg)
0967 {
0968     struct slgt_info *info = tty->driver_data;
0969     void __user *argp = (void __user *)arg;
0970     int ret;
0971 
0972     if (sanity_check(info, tty->name, "ioctl"))
0973         return -ENODEV;
0974     DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
0975 
0976     if (cmd != TIOCMIWAIT) {
0977         if (tty_io_error(tty))
0978             return -EIO;
0979     }
0980 
0981     switch (cmd) {
0982     case MGSL_IOCWAITEVENT:
0983         return wait_mgsl_event(info, argp);
0984     case TIOCMIWAIT:
0985         return modem_input_wait(info,(int)arg);
0986     case MGSL_IOCSGPIO:
0987         return set_gpio(info, argp);
0988     case MGSL_IOCGGPIO:
0989         return get_gpio(info, argp);
0990     case MGSL_IOCWAITGPIO:
0991         return wait_gpio(info, argp);
0992     case MGSL_IOCGXSYNC:
0993         return get_xsync(info, argp);
0994     case MGSL_IOCSXSYNC:
0995         return set_xsync(info, (int)arg);
0996     case MGSL_IOCGXCTRL:
0997         return get_xctrl(info, argp);
0998     case MGSL_IOCSXCTRL:
0999         return set_xctrl(info, (int)arg);
1000     }
1001     mutex_lock(&info->port.mutex);
1002     switch (cmd) {
1003     case MGSL_IOCGPARAMS:
1004         ret = get_params(info, argp);
1005         break;
1006     case MGSL_IOCSPARAMS:
1007         ret = set_params(info, argp);
1008         break;
1009     case MGSL_IOCGTXIDLE:
1010         ret = get_txidle(info, argp);
1011         break;
1012     case MGSL_IOCSTXIDLE:
1013         ret = set_txidle(info, (int)arg);
1014         break;
1015     case MGSL_IOCTXENABLE:
1016         ret = tx_enable(info, (int)arg);
1017         break;
1018     case MGSL_IOCRXENABLE:
1019         ret = rx_enable(info, (int)arg);
1020         break;
1021     case MGSL_IOCTXABORT:
1022         ret = tx_abort(info);
1023         break;
1024     case MGSL_IOCGSTATS:
1025         ret = get_stats(info, argp);
1026         break;
1027     case MGSL_IOCGIF:
1028         ret = get_interface(info, argp);
1029         break;
1030     case MGSL_IOCSIF:
1031         ret = set_interface(info,(int)arg);
1032         break;
1033     default:
1034         ret = -ENOIOCTLCMD;
1035     }
1036     mutex_unlock(&info->port.mutex);
1037     return ret;
1038 }
1039 
1040 static int get_icount(struct tty_struct *tty,
1041                 struct serial_icounter_struct *icount)
1042 
1043 {
1044     struct slgt_info *info = tty->driver_data;
1045     struct mgsl_icount cnow;    /* kernel counter temps */
1046     unsigned long flags;
1047 
1048     spin_lock_irqsave(&info->lock,flags);
1049     cnow = info->icount;
1050     spin_unlock_irqrestore(&info->lock,flags);
1051 
1052     icount->cts = cnow.cts;
1053     icount->dsr = cnow.dsr;
1054     icount->rng = cnow.rng;
1055     icount->dcd = cnow.dcd;
1056     icount->rx = cnow.rx;
1057     icount->tx = cnow.tx;
1058     icount->frame = cnow.frame;
1059     icount->overrun = cnow.overrun;
1060     icount->parity = cnow.parity;
1061     icount->brk = cnow.brk;
1062     icount->buf_overrun = cnow.buf_overrun;
1063 
1064     return 0;
1065 }
1066 
1067 /*
1068  * support for 32 bit ioctl calls on 64 bit systems
1069  */
1070 #ifdef CONFIG_COMPAT
1071 static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
1072 {
1073     struct MGSL_PARAMS32 tmp_params;
1074 
1075     DBGINFO(("%s get_params32\n", info->device_name));
1076     memset(&tmp_params, 0, sizeof(tmp_params));
1077     tmp_params.mode            = (compat_ulong_t)info->params.mode;
1078     tmp_params.loopback        = info->params.loopback;
1079     tmp_params.flags           = info->params.flags;
1080     tmp_params.encoding        = info->params.encoding;
1081     tmp_params.clock_speed     = (compat_ulong_t)info->params.clock_speed;
1082     tmp_params.addr_filter     = info->params.addr_filter;
1083     tmp_params.crc_type        = info->params.crc_type;
1084     tmp_params.preamble_length = info->params.preamble_length;
1085     tmp_params.preamble        = info->params.preamble;
1086     tmp_params.data_rate       = (compat_ulong_t)info->params.data_rate;
1087     tmp_params.data_bits       = info->params.data_bits;
1088     tmp_params.stop_bits       = info->params.stop_bits;
1089     tmp_params.parity          = info->params.parity;
1090     if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
1091         return -EFAULT;
1092     return 0;
1093 }
1094 
1095 static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
1096 {
1097     struct MGSL_PARAMS32 tmp_params;
1098 
1099     DBGINFO(("%s set_params32\n", info->device_name));
1100     if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
1101         return -EFAULT;
1102 
1103     spin_lock(&info->lock);
1104     if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
1105         info->base_clock = tmp_params.clock_speed;
1106     } else {
1107         info->params.mode            = tmp_params.mode;
1108         info->params.loopback        = tmp_params.loopback;
1109         info->params.flags           = tmp_params.flags;
1110         info->params.encoding        = tmp_params.encoding;
1111         info->params.clock_speed     = tmp_params.clock_speed;
1112         info->params.addr_filter     = tmp_params.addr_filter;
1113         info->params.crc_type        = tmp_params.crc_type;
1114         info->params.preamble_length = tmp_params.preamble_length;
1115         info->params.preamble        = tmp_params.preamble;
1116         info->params.data_rate       = tmp_params.data_rate;
1117         info->params.data_bits       = tmp_params.data_bits;
1118         info->params.stop_bits       = tmp_params.stop_bits;
1119         info->params.parity          = tmp_params.parity;
1120     }
1121     spin_unlock(&info->lock);
1122 
1123     program_hw(info);
1124 
1125     return 0;
1126 }
1127 
1128 static long slgt_compat_ioctl(struct tty_struct *tty,
1129              unsigned int cmd, unsigned long arg)
1130 {
1131     struct slgt_info *info = tty->driver_data;
1132     int rc;
1133 
1134     if (sanity_check(info, tty->name, "compat_ioctl"))
1135         return -ENODEV;
1136     DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
1137 
1138     switch (cmd) {
1139     case MGSL_IOCSPARAMS32:
1140         rc = set_params32(info, compat_ptr(arg));
1141         break;
1142 
1143     case MGSL_IOCGPARAMS32:
1144         rc = get_params32(info, compat_ptr(arg));
1145         break;
1146 
1147     case MGSL_IOCGPARAMS:
1148     case MGSL_IOCSPARAMS:
1149     case MGSL_IOCGTXIDLE:
1150     case MGSL_IOCGSTATS:
1151     case MGSL_IOCWAITEVENT:
1152     case MGSL_IOCGIF:
1153     case MGSL_IOCSGPIO:
1154     case MGSL_IOCGGPIO:
1155     case MGSL_IOCWAITGPIO:
1156     case MGSL_IOCGXSYNC:
1157     case MGSL_IOCGXCTRL:
1158         rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
1159         break;
1160     default:
1161         rc = ioctl(tty, cmd, arg);
1162     }
1163     DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
1164     return rc;
1165 }
1166 #else
1167 #define slgt_compat_ioctl NULL
1168 #endif /* ifdef CONFIG_COMPAT */
1169 
1170 /*
1171  * proc fs support
1172  */
1173 static inline void line_info(struct seq_file *m, struct slgt_info *info)
1174 {
1175     char stat_buf[30];
1176     unsigned long flags;
1177 
1178     seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
1179               info->device_name, info->phys_reg_addr,
1180               info->irq_level, info->max_frame_size);
1181 
1182     /* output current serial signal states */
1183     spin_lock_irqsave(&info->lock,flags);
1184     get_gtsignals(info);
1185     spin_unlock_irqrestore(&info->lock,flags);
1186 
1187     stat_buf[0] = 0;
1188     stat_buf[1] = 0;
1189     if (info->signals & SerialSignal_RTS)
1190         strcat(stat_buf, "|RTS");
1191     if (info->signals & SerialSignal_CTS)
1192         strcat(stat_buf, "|CTS");
1193     if (info->signals & SerialSignal_DTR)
1194         strcat(stat_buf, "|DTR");
1195     if (info->signals & SerialSignal_DSR)
1196         strcat(stat_buf, "|DSR");
1197     if (info->signals & SerialSignal_DCD)
1198         strcat(stat_buf, "|CD");
1199     if (info->signals & SerialSignal_RI)
1200         strcat(stat_buf, "|RI");
1201 
1202     if (info->params.mode != MGSL_MODE_ASYNC) {
1203         seq_printf(m, "\tHDLC txok:%d rxok:%d",
1204                    info->icount.txok, info->icount.rxok);
1205         if (info->icount.txunder)
1206             seq_printf(m, " txunder:%d", info->icount.txunder);
1207         if (info->icount.txabort)
1208             seq_printf(m, " txabort:%d", info->icount.txabort);
1209         if (info->icount.rxshort)
1210             seq_printf(m, " rxshort:%d", info->icount.rxshort);
1211         if (info->icount.rxlong)
1212             seq_printf(m, " rxlong:%d", info->icount.rxlong);
1213         if (info->icount.rxover)
1214             seq_printf(m, " rxover:%d", info->icount.rxover);
1215         if (info->icount.rxcrc)
1216             seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
1217     } else {
1218         seq_printf(m, "\tASYNC tx:%d rx:%d",
1219                    info->icount.tx, info->icount.rx);
1220         if (info->icount.frame)
1221             seq_printf(m, " fe:%d", info->icount.frame);
1222         if (info->icount.parity)
1223             seq_printf(m, " pe:%d", info->icount.parity);
1224         if (info->icount.brk)
1225             seq_printf(m, " brk:%d", info->icount.brk);
1226         if (info->icount.overrun)
1227             seq_printf(m, " oe:%d", info->icount.overrun);
1228     }
1229 
1230     /* Append serial signal status to end */
1231     seq_printf(m, " %s\n", stat_buf+1);
1232 
1233     seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
1234                info->tx_active,info->bh_requested,info->bh_running,
1235                info->pending_bh);
1236 }
1237 
1238 /* Called to print information about devices
1239  */
1240 static int synclink_gt_proc_show(struct seq_file *m, void *v)
1241 {
1242     struct slgt_info *info;
1243 
1244     seq_puts(m, "synclink_gt driver\n");
1245 
1246     info = slgt_device_list;
1247     while( info ) {
1248         line_info(m, info);
1249         info = info->next_device;
1250     }
1251     return 0;
1252 }
1253 
1254 /*
1255  * return count of bytes in transmit buffer
1256  */
1257 static unsigned int chars_in_buffer(struct tty_struct *tty)
1258 {
1259     struct slgt_info *info = tty->driver_data;
1260     unsigned int count;
1261     if (sanity_check(info, tty->name, "chars_in_buffer"))
1262         return 0;
1263     count = tbuf_bytes(info);
1264     DBGINFO(("%s chars_in_buffer()=%u\n", info->device_name, count));
1265     return count;
1266 }
1267 
1268 /*
1269  * signal remote device to throttle send data (our receive data)
1270  */
1271 static void throttle(struct tty_struct * tty)
1272 {
1273     struct slgt_info *info = tty->driver_data;
1274     unsigned long flags;
1275 
1276     if (sanity_check(info, tty->name, "throttle"))
1277         return;
1278     DBGINFO(("%s throttle\n", info->device_name));
1279     if (I_IXOFF(tty))
1280         send_xchar(tty, STOP_CHAR(tty));
1281     if (C_CRTSCTS(tty)) {
1282         spin_lock_irqsave(&info->lock,flags);
1283         info->signals &= ~SerialSignal_RTS;
1284         set_gtsignals(info);
1285         spin_unlock_irqrestore(&info->lock,flags);
1286     }
1287 }
1288 
1289 /*
1290  * signal remote device to stop throttling send data (our receive data)
1291  */
1292 static void unthrottle(struct tty_struct * tty)
1293 {
1294     struct slgt_info *info = tty->driver_data;
1295     unsigned long flags;
1296 
1297     if (sanity_check(info, tty->name, "unthrottle"))
1298         return;
1299     DBGINFO(("%s unthrottle\n", info->device_name));
1300     if (I_IXOFF(tty)) {
1301         if (info->x_char)
1302             info->x_char = 0;
1303         else
1304             send_xchar(tty, START_CHAR(tty));
1305     }
1306     if (C_CRTSCTS(tty)) {
1307         spin_lock_irqsave(&info->lock,flags);
1308         info->signals |= SerialSignal_RTS;
1309         set_gtsignals(info);
1310         spin_unlock_irqrestore(&info->lock,flags);
1311     }
1312 }
1313 
1314 /*
1315  * set or clear transmit break condition
1316  * break_state  -1=set break condition, 0=clear
1317  */
1318 static int set_break(struct tty_struct *tty, int break_state)
1319 {
1320     struct slgt_info *info = tty->driver_data;
1321     unsigned short value;
1322     unsigned long flags;
1323 
1324     if (sanity_check(info, tty->name, "set_break"))
1325         return -EINVAL;
1326     DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
1327 
1328     spin_lock_irqsave(&info->lock,flags);
1329     value = rd_reg16(info, TCR);
1330     if (break_state == -1)
1331         value |= BIT6;
1332     else
1333         value &= ~BIT6;
1334     wr_reg16(info, TCR, value);
1335     spin_unlock_irqrestore(&info->lock,flags);
1336     return 0;
1337 }
1338 
1339 #if SYNCLINK_GENERIC_HDLC
1340 
1341 /**
1342  * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
1343  * @dev:      pointer to network device structure
1344  * @encoding: serial encoding setting
1345  * @parity:   FCS setting
1346  *
1347  * Set encoding and frame check sequence (FCS) options.
1348  *
1349  * Return: 0 if success, otherwise error code
1350  */
1351 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
1352               unsigned short parity)
1353 {
1354     struct slgt_info *info = dev_to_port(dev);
1355     unsigned char  new_encoding;
1356     unsigned short new_crctype;
1357 
1358     /* return error if TTY interface open */
1359     if (info->port.count)
1360         return -EBUSY;
1361 
1362     DBGINFO(("%s hdlcdev_attach\n", info->device_name));
1363 
1364     switch (encoding)
1365     {
1366     case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
1367     case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
1368     case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
1369     case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
1370     case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
1371     default: return -EINVAL;
1372     }
1373 
1374     switch (parity)
1375     {
1376     case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
1377     case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
1378     case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
1379     default: return -EINVAL;
1380     }
1381 
1382     info->params.encoding = new_encoding;
1383     info->params.crc_type = new_crctype;
1384 
1385     /* if network interface up, reprogram hardware */
1386     if (info->netcount)
1387         program_hw(info);
1388 
1389     return 0;
1390 }
1391 
1392 /**
1393  * hdlcdev_xmit - called by generic HDLC layer to send a frame
1394  * @skb: socket buffer containing HDLC frame
1395  * @dev: pointer to network device structure
1396  */
1397 static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
1398                       struct net_device *dev)
1399 {
1400     struct slgt_info *info = dev_to_port(dev);
1401     unsigned long flags;
1402 
1403     DBGINFO(("%s hdlc_xmit\n", dev->name));
1404 
1405     if (!skb->len)
1406         return NETDEV_TX_OK;
1407 
1408     /* stop sending until this frame completes */
1409     netif_stop_queue(dev);
1410 
1411     /* update network statistics */
1412     dev->stats.tx_packets++;
1413     dev->stats.tx_bytes += skb->len;
1414 
1415     /* save start time for transmit timeout detection */
1416     netif_trans_update(dev);
1417 
1418     spin_lock_irqsave(&info->lock, flags);
1419     tx_load(info, skb->data, skb->len);
1420     spin_unlock_irqrestore(&info->lock, flags);
1421 
1422     /* done with socket buffer, so free it */
1423     dev_kfree_skb(skb);
1424 
1425     return NETDEV_TX_OK;
1426 }
1427 
1428 /**
1429  * hdlcdev_open - called by network layer when interface enabled
1430  * @dev: pointer to network device structure
1431  *
1432  * Claim resources and initialize hardware.
1433  *
1434  * Return: 0 if success, otherwise error code
1435  */
1436 static int hdlcdev_open(struct net_device *dev)
1437 {
1438     struct slgt_info *info = dev_to_port(dev);
1439     int rc;
1440     unsigned long flags;
1441 
1442     if (!try_module_get(THIS_MODULE))
1443         return -EBUSY;
1444 
1445     DBGINFO(("%s hdlcdev_open\n", dev->name));
1446 
1447     /* generic HDLC layer open processing */
1448     rc = hdlc_open(dev);
1449     if (rc)
1450         return rc;
1451 
1452     /* arbitrate between network and tty opens */
1453     spin_lock_irqsave(&info->netlock, flags);
1454     if (info->port.count != 0 || info->netcount != 0) {
1455         DBGINFO(("%s hdlc_open busy\n", dev->name));
1456         spin_unlock_irqrestore(&info->netlock, flags);
1457         return -EBUSY;
1458     }
1459     info->netcount=1;
1460     spin_unlock_irqrestore(&info->netlock, flags);
1461 
1462     /* claim resources and init adapter */
1463     if ((rc = startup(info)) != 0) {
1464         spin_lock_irqsave(&info->netlock, flags);
1465         info->netcount=0;
1466         spin_unlock_irqrestore(&info->netlock, flags);
1467         return rc;
1468     }
1469 
1470     /* assert RTS and DTR, apply hardware settings */
1471     info->signals |= SerialSignal_RTS | SerialSignal_DTR;
1472     program_hw(info);
1473 
1474     /* enable network layer transmit */
1475     netif_trans_update(dev);
1476     netif_start_queue(dev);
1477 
1478     /* inform generic HDLC layer of current DCD status */
1479     spin_lock_irqsave(&info->lock, flags);
1480     get_gtsignals(info);
1481     spin_unlock_irqrestore(&info->lock, flags);
1482     if (info->signals & SerialSignal_DCD)
1483         netif_carrier_on(dev);
1484     else
1485         netif_carrier_off(dev);
1486     return 0;
1487 }
1488 
1489 /**
1490  * hdlcdev_close - called by network layer when interface is disabled
1491  * @dev:  pointer to network device structure
1492  *
1493  * Shutdown hardware and release resources.
1494  *
1495  * Return: 0 if success, otherwise error code
1496  */
1497 static int hdlcdev_close(struct net_device *dev)
1498 {
1499     struct slgt_info *info = dev_to_port(dev);
1500     unsigned long flags;
1501 
1502     DBGINFO(("%s hdlcdev_close\n", dev->name));
1503 
1504     netif_stop_queue(dev);
1505 
1506     /* shutdown adapter and release resources */
1507     shutdown(info);
1508 
1509     hdlc_close(dev);
1510 
1511     spin_lock_irqsave(&info->netlock, flags);
1512     info->netcount=0;
1513     spin_unlock_irqrestore(&info->netlock, flags);
1514 
1515     module_put(THIS_MODULE);
1516     return 0;
1517 }
1518 
1519 /**
1520  * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
1521  * @dev: pointer to network device structure
1522  * @ifr: pointer to network interface request structure
1523  * @cmd: IOCTL command code
1524  *
1525  * Return: 0 if success, otherwise error code
1526  */
1527 static int hdlcdev_ioctl(struct net_device *dev, struct if_settings *ifs)
1528 {
1529     const size_t size = sizeof(sync_serial_settings);
1530     sync_serial_settings new_line;
1531     sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
1532     struct slgt_info *info = dev_to_port(dev);
1533     unsigned int flags;
1534 
1535     DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
1536 
1537     /* return error if TTY interface open */
1538     if (info->port.count)
1539         return -EBUSY;
1540 
1541     memset(&new_line, 0, sizeof(new_line));
1542 
1543     switch (ifs->type) {
1544     case IF_GET_IFACE: /* return current sync_serial_settings */
1545 
1546         ifs->type = IF_IFACE_SYNC_SERIAL;
1547         if (ifs->size < size) {
1548             ifs->size = size; /* data size wanted */
1549             return -ENOBUFS;
1550         }
1551 
1552         flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1553                           HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1554                           HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1555                           HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1556 
1557         switch (flags){
1558         case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
1559         case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
1560         case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
1561         case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
1562         default: new_line.clock_type = CLOCK_DEFAULT;
1563         }
1564 
1565         new_line.clock_rate = info->params.clock_speed;
1566         new_line.loopback   = info->params.loopback ? 1:0;
1567 
1568         if (copy_to_user(line, &new_line, size))
1569             return -EFAULT;
1570         return 0;
1571 
1572     case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
1573 
1574         if(!capable(CAP_NET_ADMIN))
1575             return -EPERM;
1576         if (copy_from_user(&new_line, line, size))
1577             return -EFAULT;
1578 
1579         switch (new_line.clock_type)
1580         {
1581         case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
1582         case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
1583         case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
1584         case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
1585         case CLOCK_DEFAULT:  flags = info->params.flags &
1586                          (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1587                           HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1588                           HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1589                           HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
1590         default: return -EINVAL;
1591         }
1592 
1593         if (new_line.loopback != 0 && new_line.loopback != 1)
1594             return -EINVAL;
1595 
1596         info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1597                     HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1598                     HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1599                     HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1600         info->params.flags |= flags;
1601 
1602         info->params.loopback = new_line.loopback;
1603 
1604         if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
1605             info->params.clock_speed = new_line.clock_rate;
1606         else
1607             info->params.clock_speed = 0;
1608 
1609         /* if network interface up, reprogram hardware */
1610         if (info->netcount)
1611             program_hw(info);
1612         return 0;
1613 
1614     default:
1615         return hdlc_ioctl(dev, ifs);
1616     }
1617 }
1618 
1619 /**
1620  * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
1621  * @dev: pointer to network device structure
1622  * @txqueue: unused
1623  */
1624 static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
1625 {
1626     struct slgt_info *info = dev_to_port(dev);
1627     unsigned long flags;
1628 
1629     DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
1630 
1631     dev->stats.tx_errors++;
1632     dev->stats.tx_aborted_errors++;
1633 
1634     spin_lock_irqsave(&info->lock,flags);
1635     tx_stop(info);
1636     spin_unlock_irqrestore(&info->lock,flags);
1637 
1638     netif_wake_queue(dev);
1639 }
1640 
1641 /**
1642  * hdlcdev_tx_done - called by device driver when transmit completes
1643  * @info: pointer to device instance information
1644  *
1645  * Reenable network layer transmit if stopped.
1646  */
1647 static void hdlcdev_tx_done(struct slgt_info *info)
1648 {
1649     if (netif_queue_stopped(info->netdev))
1650         netif_wake_queue(info->netdev);
1651 }
1652 
1653 /**
1654  * hdlcdev_rx - called by device driver when frame received
1655  * @info: pointer to device instance information
1656  * @buf:  pointer to buffer contianing frame data
1657  * @size: count of data bytes in buf
1658  *
1659  * Pass frame to network layer.
1660  */
1661 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
1662 {
1663     struct sk_buff *skb = dev_alloc_skb(size);
1664     struct net_device *dev = info->netdev;
1665 
1666     DBGINFO(("%s hdlcdev_rx\n", dev->name));
1667 
1668     if (skb == NULL) {
1669         DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
1670         dev->stats.rx_dropped++;
1671         return;
1672     }
1673 
1674     skb_put_data(skb, buf, size);
1675 
1676     skb->protocol = hdlc_type_trans(skb, dev);
1677 
1678     dev->stats.rx_packets++;
1679     dev->stats.rx_bytes += size;
1680 
1681     netif_rx(skb);
1682 }
1683 
1684 static const struct net_device_ops hdlcdev_ops = {
1685     .ndo_open       = hdlcdev_open,
1686     .ndo_stop       = hdlcdev_close,
1687     .ndo_start_xmit = hdlc_start_xmit,
1688     .ndo_siocwandev = hdlcdev_ioctl,
1689     .ndo_tx_timeout = hdlcdev_tx_timeout,
1690 };
1691 
1692 /**
1693  * hdlcdev_init - called by device driver when adding device instance
1694  * @info: pointer to device instance information
1695  *
1696  * Do generic HDLC initialization.
1697  *
1698  * Return: 0 if success, otherwise error code
1699  */
1700 static int hdlcdev_init(struct slgt_info *info)
1701 {
1702     int rc;
1703     struct net_device *dev;
1704     hdlc_device *hdlc;
1705 
1706     /* allocate and initialize network and HDLC layer objects */
1707 
1708     dev = alloc_hdlcdev(info);
1709     if (!dev) {
1710         printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
1711         return -ENOMEM;
1712     }
1713 
1714     /* for network layer reporting purposes only */
1715     dev->mem_start = info->phys_reg_addr;
1716     dev->mem_end   = info->phys_reg_addr + SLGT_REG_SIZE - 1;
1717     dev->irq       = info->irq_level;
1718 
1719     /* network layer callbacks and settings */
1720     dev->netdev_ops     = &hdlcdev_ops;
1721     dev->watchdog_timeo = 10 * HZ;
1722     dev->tx_queue_len   = 50;
1723 
1724     /* generic HDLC layer callbacks and settings */
1725     hdlc         = dev_to_hdlc(dev);
1726     hdlc->attach = hdlcdev_attach;
1727     hdlc->xmit   = hdlcdev_xmit;
1728 
1729     /* register objects with HDLC layer */
1730     rc = register_hdlc_device(dev);
1731     if (rc) {
1732         printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
1733         free_netdev(dev);
1734         return rc;
1735     }
1736 
1737     info->netdev = dev;
1738     return 0;
1739 }
1740 
1741 /**
1742  * hdlcdev_exit - called by device driver when removing device instance
1743  * @info: pointer to device instance information
1744  *
1745  * Do generic HDLC cleanup.
1746  */
1747 static void hdlcdev_exit(struct slgt_info *info)
1748 {
1749     if (!info->netdev)
1750         return;
1751     unregister_hdlc_device(info->netdev);
1752     free_netdev(info->netdev);
1753     info->netdev = NULL;
1754 }
1755 
1756 #endif /* ifdef CONFIG_HDLC */
1757 
1758 /*
1759  * get async data from rx DMA buffers
1760  */
1761 static void rx_async(struct slgt_info *info)
1762 {
1763     struct mgsl_icount *icount = &info->icount;
1764     unsigned int start, end;
1765     unsigned char *p;
1766     unsigned char status;
1767     struct slgt_desc *bufs = info->rbufs;
1768     int i, count;
1769     int chars = 0;
1770     int stat;
1771     unsigned char ch;
1772 
1773     start = end = info->rbuf_current;
1774 
1775     while(desc_complete(bufs[end])) {
1776         count = desc_count(bufs[end]) - info->rbuf_index;
1777         p     = bufs[end].buf + info->rbuf_index;
1778 
1779         DBGISR(("%s rx_async count=%d\n", info->device_name, count));
1780         DBGDATA(info, p, count, "rx");
1781 
1782         for(i=0 ; i < count; i+=2, p+=2) {
1783             ch = *p;
1784             icount->rx++;
1785 
1786             stat = 0;
1787 
1788             status = *(p + 1) & (BIT1 + BIT0);
1789             if (status) {
1790                 if (status & BIT1)
1791                     icount->parity++;
1792                 else if (status & BIT0)
1793                     icount->frame++;
1794                 /* discard char if tty control flags say so */
1795                 if (status & info->ignore_status_mask)
1796                     continue;
1797                 if (status & BIT1)
1798                     stat = TTY_PARITY;
1799                 else if (status & BIT0)
1800                     stat = TTY_FRAME;
1801             }
1802             tty_insert_flip_char(&info->port, ch, stat);
1803             chars++;
1804         }
1805 
1806         if (i < count) {
1807             /* receive buffer not completed */
1808             info->rbuf_index += i;
1809             mod_timer(&info->rx_timer, jiffies + 1);
1810             break;
1811         }
1812 
1813         info->rbuf_index = 0;
1814         free_rbufs(info, end, end);
1815 
1816         if (++end == info->rbuf_count)
1817             end = 0;
1818 
1819         /* if entire list searched then no frame available */
1820         if (end == start)
1821             break;
1822     }
1823 
1824     if (chars)
1825         tty_flip_buffer_push(&info->port);
1826 }
1827 
1828 /*
1829  * return next bottom half action to perform
1830  */
1831 static int bh_action(struct slgt_info *info)
1832 {
1833     unsigned long flags;
1834     int rc;
1835 
1836     spin_lock_irqsave(&info->lock,flags);
1837 
1838     if (info->pending_bh & BH_RECEIVE) {
1839         info->pending_bh &= ~BH_RECEIVE;
1840         rc = BH_RECEIVE;
1841     } else if (info->pending_bh & BH_TRANSMIT) {
1842         info->pending_bh &= ~BH_TRANSMIT;
1843         rc = BH_TRANSMIT;
1844     } else if (info->pending_bh & BH_STATUS) {
1845         info->pending_bh &= ~BH_STATUS;
1846         rc = BH_STATUS;
1847     } else {
1848         /* Mark BH routine as complete */
1849         info->bh_running = false;
1850         info->bh_requested = false;
1851         rc = 0;
1852     }
1853 
1854     spin_unlock_irqrestore(&info->lock,flags);
1855 
1856     return rc;
1857 }
1858 
1859 /*
1860  * perform bottom half processing
1861  */
1862 static void bh_handler(struct work_struct *work)
1863 {
1864     struct slgt_info *info = container_of(work, struct slgt_info, task);
1865     int action;
1866 
1867     info->bh_running = true;
1868 
1869     while((action = bh_action(info))) {
1870         switch (action) {
1871         case BH_RECEIVE:
1872             DBGBH(("%s bh receive\n", info->device_name));
1873             switch(info->params.mode) {
1874             case MGSL_MODE_ASYNC:
1875                 rx_async(info);
1876                 break;
1877             case MGSL_MODE_HDLC:
1878                 while(rx_get_frame(info));
1879                 break;
1880             case MGSL_MODE_RAW:
1881             case MGSL_MODE_MONOSYNC:
1882             case MGSL_MODE_BISYNC:
1883             case MGSL_MODE_XSYNC:
1884                 while(rx_get_buf(info));
1885                 break;
1886             }
1887             /* restart receiver if rx DMA buffers exhausted */
1888             if (info->rx_restart)
1889                 rx_start(info);
1890             break;
1891         case BH_TRANSMIT:
1892             bh_transmit(info);
1893             break;
1894         case BH_STATUS:
1895             DBGBH(("%s bh status\n", info->device_name));
1896             info->ri_chkcount = 0;
1897             info->dsr_chkcount = 0;
1898             info->dcd_chkcount = 0;
1899             info->cts_chkcount = 0;
1900             break;
1901         default:
1902             DBGBH(("%s unknown action\n", info->device_name));
1903             break;
1904         }
1905     }
1906     DBGBH(("%s bh_handler exit\n", info->device_name));
1907 }
1908 
1909 static void bh_transmit(struct slgt_info *info)
1910 {
1911     struct tty_struct *tty = info->port.tty;
1912 
1913     DBGBH(("%s bh_transmit\n", info->device_name));
1914     if (tty)
1915         tty_wakeup(tty);
1916 }
1917 
1918 static void dsr_change(struct slgt_info *info, unsigned short status)
1919 {
1920     if (status & BIT3) {
1921         info->signals |= SerialSignal_DSR;
1922         info->input_signal_events.dsr_up++;
1923     } else {
1924         info->signals &= ~SerialSignal_DSR;
1925         info->input_signal_events.dsr_down++;
1926     }
1927     DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
1928     if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
1929         slgt_irq_off(info, IRQ_DSR);
1930         return;
1931     }
1932     info->icount.dsr++;
1933     wake_up_interruptible(&info->status_event_wait_q);
1934     wake_up_interruptible(&info->event_wait_q);
1935     info->pending_bh |= BH_STATUS;
1936 }
1937 
1938 static void cts_change(struct slgt_info *info, unsigned short status)
1939 {
1940     if (status & BIT2) {
1941         info->signals |= SerialSignal_CTS;
1942         info->input_signal_events.cts_up++;
1943     } else {
1944         info->signals &= ~SerialSignal_CTS;
1945         info->input_signal_events.cts_down++;
1946     }
1947     DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
1948     if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
1949         slgt_irq_off(info, IRQ_CTS);
1950         return;
1951     }
1952     info->icount.cts++;
1953     wake_up_interruptible(&info->status_event_wait_q);
1954     wake_up_interruptible(&info->event_wait_q);
1955     info->pending_bh |= BH_STATUS;
1956 
1957     if (tty_port_cts_enabled(&info->port)) {
1958         if (info->port.tty) {
1959             if (info->port.tty->hw_stopped) {
1960                 if (info->signals & SerialSignal_CTS) {
1961                     info->port.tty->hw_stopped = 0;
1962                     info->pending_bh |= BH_TRANSMIT;
1963                     return;
1964                 }
1965             } else {
1966                 if (!(info->signals & SerialSignal_CTS))
1967                     info->port.tty->hw_stopped = 1;
1968             }
1969         }
1970     }
1971 }
1972 
1973 static void dcd_change(struct slgt_info *info, unsigned short status)
1974 {
1975     if (status & BIT1) {
1976         info->signals |= SerialSignal_DCD;
1977         info->input_signal_events.dcd_up++;
1978     } else {
1979         info->signals &= ~SerialSignal_DCD;
1980         info->input_signal_events.dcd_down++;
1981     }
1982     DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
1983     if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
1984         slgt_irq_off(info, IRQ_DCD);
1985         return;
1986     }
1987     info->icount.dcd++;
1988 #if SYNCLINK_GENERIC_HDLC
1989     if (info->netcount) {
1990         if (info->signals & SerialSignal_DCD)
1991             netif_carrier_on(info->netdev);
1992         else
1993             netif_carrier_off(info->netdev);
1994     }
1995 #endif
1996     wake_up_interruptible(&info->status_event_wait_q);
1997     wake_up_interruptible(&info->event_wait_q);
1998     info->pending_bh |= BH_STATUS;
1999 
2000     if (tty_port_check_carrier(&info->port)) {
2001         if (info->signals & SerialSignal_DCD)
2002             wake_up_interruptible(&info->port.open_wait);
2003         else {
2004             if (info->port.tty)
2005                 tty_hangup(info->port.tty);
2006         }
2007     }
2008 }
2009 
2010 static void ri_change(struct slgt_info *info, unsigned short status)
2011 {
2012     if (status & BIT0) {
2013         info->signals |= SerialSignal_RI;
2014         info->input_signal_events.ri_up++;
2015     } else {
2016         info->signals &= ~SerialSignal_RI;
2017         info->input_signal_events.ri_down++;
2018     }
2019     DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
2020     if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2021         slgt_irq_off(info, IRQ_RI);
2022         return;
2023     }
2024     info->icount.rng++;
2025     wake_up_interruptible(&info->status_event_wait_q);
2026     wake_up_interruptible(&info->event_wait_q);
2027     info->pending_bh |= BH_STATUS;
2028 }
2029 
2030 static void isr_rxdata(struct slgt_info *info)
2031 {
2032     unsigned int count = info->rbuf_fill_count;
2033     unsigned int i = info->rbuf_fill_index;
2034     unsigned short reg;
2035 
2036     while (rd_reg16(info, SSR) & IRQ_RXDATA) {
2037         reg = rd_reg16(info, RDR);
2038         DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
2039         if (desc_complete(info->rbufs[i])) {
2040             /* all buffers full */
2041             rx_stop(info);
2042             info->rx_restart = true;
2043             continue;
2044         }
2045         info->rbufs[i].buf[count++] = (unsigned char)reg;
2046         /* async mode saves status byte to buffer for each data byte */
2047         if (info->params.mode == MGSL_MODE_ASYNC)
2048             info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
2049         if (count == info->rbuf_fill_level || (reg & BIT10)) {
2050             /* buffer full or end of frame */
2051             set_desc_count(info->rbufs[i], count);
2052             set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
2053             info->rbuf_fill_count = count = 0;
2054             if (++i == info->rbuf_count)
2055                 i = 0;
2056             info->pending_bh |= BH_RECEIVE;
2057         }
2058     }
2059 
2060     info->rbuf_fill_index = i;
2061     info->rbuf_fill_count = count;
2062 }
2063 
2064 static void isr_serial(struct slgt_info *info)
2065 {
2066     unsigned short status = rd_reg16(info, SSR);
2067 
2068     DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
2069 
2070     wr_reg16(info, SSR, status); /* clear pending */
2071 
2072     info->irq_occurred = true;
2073 
2074     if (info->params.mode == MGSL_MODE_ASYNC) {
2075         if (status & IRQ_TXIDLE) {
2076             if (info->tx_active)
2077                 isr_txeom(info, status);
2078         }
2079         if (info->rx_pio && (status & IRQ_RXDATA))
2080             isr_rxdata(info);
2081         if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
2082             info->icount.brk++;
2083             /* process break detection if tty control allows */
2084             if (info->port.tty) {
2085                 if (!(status & info->ignore_status_mask)) {
2086                     if (info->read_status_mask & MASK_BREAK) {
2087                         tty_insert_flip_char(&info->port, 0, TTY_BREAK);
2088                         if (info->port.flags & ASYNC_SAK)
2089                             do_SAK(info->port.tty);
2090                     }
2091                 }
2092             }
2093         }
2094     } else {
2095         if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
2096             isr_txeom(info, status);
2097         if (info->rx_pio && (status & IRQ_RXDATA))
2098             isr_rxdata(info);
2099         if (status & IRQ_RXIDLE) {
2100             if (status & RXIDLE)
2101                 info->icount.rxidle++;
2102             else
2103                 info->icount.exithunt++;
2104             wake_up_interruptible(&info->event_wait_q);
2105         }
2106 
2107         if (status & IRQ_RXOVER)
2108             rx_start(info);
2109     }
2110 
2111     if (status & IRQ_DSR)
2112         dsr_change(info, status);
2113     if (status & IRQ_CTS)
2114         cts_change(info, status);
2115     if (status & IRQ_DCD)
2116         dcd_change(info, status);
2117     if (status & IRQ_RI)
2118         ri_change(info, status);
2119 }
2120 
2121 static void isr_rdma(struct slgt_info *info)
2122 {
2123     unsigned int status = rd_reg32(info, RDCSR);
2124 
2125     DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
2126 
2127     /* RDCSR (rx DMA control/status)
2128      *
2129      * 31..07  reserved
2130      * 06      save status byte to DMA buffer
2131      * 05      error
2132      * 04      eol (end of list)
2133      * 03      eob (end of buffer)
2134      * 02      IRQ enable
2135      * 01      reset
2136      * 00      enable
2137      */
2138     wr_reg32(info, RDCSR, status);  /* clear pending */
2139 
2140     if (status & (BIT5 + BIT4)) {
2141         DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
2142         info->rx_restart = true;
2143     }
2144     info->pending_bh |= BH_RECEIVE;
2145 }
2146 
2147 static void isr_tdma(struct slgt_info *info)
2148 {
2149     unsigned int status = rd_reg32(info, TDCSR);
2150 
2151     DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
2152 
2153     /* TDCSR (tx DMA control/status)
2154      *
2155      * 31..06  reserved
2156      * 05      error
2157      * 04      eol (end of list)
2158      * 03      eob (end of buffer)
2159      * 02      IRQ enable
2160      * 01      reset
2161      * 00      enable
2162      */
2163     wr_reg32(info, TDCSR, status);  /* clear pending */
2164 
2165     if (status & (BIT5 + BIT4 + BIT3)) {
2166         // another transmit buffer has completed
2167         // run bottom half to get more send data from user
2168         info->pending_bh |= BH_TRANSMIT;
2169     }
2170 }
2171 
2172 /*
2173  * return true if there are unsent tx DMA buffers, otherwise false
2174  *
2175  * if there are unsent buffers then info->tbuf_start
2176  * is set to index of first unsent buffer
2177  */
2178 static bool unsent_tbufs(struct slgt_info *info)
2179 {
2180     unsigned int i = info->tbuf_current;
2181     bool rc = false;
2182 
2183     /*
2184      * search backwards from last loaded buffer (precedes tbuf_current)
2185      * for first unsent buffer (desc_count > 0)
2186      */
2187 
2188     do {
2189         if (i)
2190             i--;
2191         else
2192             i = info->tbuf_count - 1;
2193         if (!desc_count(info->tbufs[i]))
2194             break;
2195         info->tbuf_start = i;
2196         rc = true;
2197     } while (i != info->tbuf_current);
2198 
2199     return rc;
2200 }
2201 
2202 static void isr_txeom(struct slgt_info *info, unsigned short status)
2203 {
2204     DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2205 
2206     slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2207     tdma_reset(info);
2208     if (status & IRQ_TXUNDER) {
2209         unsigned short val = rd_reg16(info, TCR);
2210         wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
2211         wr_reg16(info, TCR, val); /* clear reset bit */
2212     }
2213 
2214     if (info->tx_active) {
2215         if (info->params.mode != MGSL_MODE_ASYNC) {
2216             if (status & IRQ_TXUNDER)
2217                 info->icount.txunder++;
2218             else if (status & IRQ_TXIDLE)
2219                 info->icount.txok++;
2220         }
2221 
2222         if (unsent_tbufs(info)) {
2223             tx_start(info);
2224             update_tx_timer(info);
2225             return;
2226         }
2227         info->tx_active = false;
2228 
2229         del_timer(&info->tx_timer);
2230 
2231         if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
2232             info->signals &= ~SerialSignal_RTS;
2233             info->drop_rts_on_tx_done = false;
2234             set_gtsignals(info);
2235         }
2236 
2237 #if SYNCLINK_GENERIC_HDLC
2238         if (info->netcount)
2239             hdlcdev_tx_done(info);
2240         else
2241 #endif
2242         {
2243             if (info->port.tty && (info->port.tty->flow.stopped || info->port.tty->hw_stopped)) {
2244                 tx_stop(info);
2245                 return;
2246             }
2247             info->pending_bh |= BH_TRANSMIT;
2248         }
2249     }
2250 }
2251 
2252 static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
2253 {
2254     struct cond_wait *w, *prev;
2255 
2256     /* wake processes waiting for specific transitions */
2257     for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
2258         if (w->data & changed) {
2259             w->data = state;
2260             wake_up_interruptible(&w->q);
2261             if (prev != NULL)
2262                 prev->next = w->next;
2263             else
2264                 info->gpio_wait_q = w->next;
2265         } else
2266             prev = w;
2267     }
2268 }
2269 
2270 /* interrupt service routine
2271  *
2272  *  irq interrupt number
2273  *  dev_id  device ID supplied during interrupt registration
2274  */
2275 static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
2276 {
2277     struct slgt_info *info = dev_id;
2278     unsigned int gsr;
2279     unsigned int i;
2280 
2281     DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
2282 
2283     while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
2284         DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
2285         info->irq_occurred = true;
2286         for(i=0; i < info->port_count ; i++) {
2287             if (info->port_array[i] == NULL)
2288                 continue;
2289             spin_lock(&info->port_array[i]->lock);
2290             if (gsr & (BIT8 << i))
2291                 isr_serial(info->port_array[i]);
2292             if (gsr & (BIT16 << (i*2)))
2293                 isr_rdma(info->port_array[i]);
2294             if (gsr & (BIT17 << (i*2)))
2295                 isr_tdma(info->port_array[i]);
2296             spin_unlock(&info->port_array[i]->lock);
2297         }
2298     }
2299 
2300     if (info->gpio_present) {
2301         unsigned int state;
2302         unsigned int changed;
2303         spin_lock(&info->lock);
2304         while ((changed = rd_reg32(info, IOSR)) != 0) {
2305             DBGISR(("%s iosr=%08x\n", info->device_name, changed));
2306             /* read latched state of GPIO signals */
2307             state = rd_reg32(info, IOVR);
2308             /* clear pending GPIO interrupt bits */
2309             wr_reg32(info, IOSR, changed);
2310             for (i=0 ; i < info->port_count ; i++) {
2311                 if (info->port_array[i] != NULL)
2312                     isr_gpio(info->port_array[i], changed, state);
2313             }
2314         }
2315         spin_unlock(&info->lock);
2316     }
2317 
2318     for(i=0; i < info->port_count ; i++) {
2319         struct slgt_info *port = info->port_array[i];
2320         if (port == NULL)
2321             continue;
2322         spin_lock(&port->lock);
2323         if ((port->port.count || port->netcount) &&
2324             port->pending_bh && !port->bh_running &&
2325             !port->bh_requested) {
2326             DBGISR(("%s bh queued\n", port->device_name));
2327             schedule_work(&port->task);
2328             port->bh_requested = true;
2329         }
2330         spin_unlock(&port->lock);
2331     }
2332 
2333     DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
2334     return IRQ_HANDLED;
2335 }
2336 
2337 static int startup(struct slgt_info *info)
2338 {
2339     DBGINFO(("%s startup\n", info->device_name));
2340 
2341     if (tty_port_initialized(&info->port))
2342         return 0;
2343 
2344     if (!info->tx_buf) {
2345         info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2346         if (!info->tx_buf) {
2347             DBGERR(("%s can't allocate tx buffer\n", info->device_name));
2348             return -ENOMEM;
2349         }
2350     }
2351 
2352     info->pending_bh = 0;
2353 
2354     memset(&info->icount, 0, sizeof(info->icount));
2355 
2356     /* program hardware for current parameters */
2357     change_params(info);
2358 
2359     if (info->port.tty)
2360         clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
2361 
2362     tty_port_set_initialized(&info->port, 1);
2363 
2364     return 0;
2365 }
2366 
2367 /*
2368  *  called by close() and hangup() to shutdown hardware
2369  */
2370 static void shutdown(struct slgt_info *info)
2371 {
2372     unsigned long flags;
2373 
2374     if (!tty_port_initialized(&info->port))
2375         return;
2376 
2377     DBGINFO(("%s shutdown\n", info->device_name));
2378 
2379     /* clear status wait queue because status changes */
2380     /* can't happen after shutting down the hardware */
2381     wake_up_interruptible(&info->status_event_wait_q);
2382     wake_up_interruptible(&info->event_wait_q);
2383 
2384     del_timer_sync(&info->tx_timer);
2385     del_timer_sync(&info->rx_timer);
2386 
2387     kfree(info->tx_buf);
2388     info->tx_buf = NULL;
2389 
2390     spin_lock_irqsave(&info->lock,flags);
2391 
2392     tx_stop(info);
2393     rx_stop(info);
2394 
2395     slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
2396 
2397     if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
2398         info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
2399         set_gtsignals(info);
2400     }
2401 
2402     flush_cond_wait(&info->gpio_wait_q);
2403 
2404     spin_unlock_irqrestore(&info->lock,flags);
2405 
2406     if (info->port.tty)
2407         set_bit(TTY_IO_ERROR, &info->port.tty->flags);
2408 
2409     tty_port_set_initialized(&info->port, 0);
2410 }
2411 
2412 static void program_hw(struct slgt_info *info)
2413 {
2414     unsigned long flags;
2415 
2416     spin_lock_irqsave(&info->lock,flags);
2417 
2418     rx_stop(info);
2419     tx_stop(info);
2420 
2421     if (info->params.mode != MGSL_MODE_ASYNC ||
2422         info->netcount)
2423         sync_mode(info);
2424     else
2425         async_mode(info);
2426 
2427     set_gtsignals(info);
2428 
2429     info->dcd_chkcount = 0;
2430     info->cts_chkcount = 0;
2431     info->ri_chkcount = 0;
2432     info->dsr_chkcount = 0;
2433 
2434     slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
2435     get_gtsignals(info);
2436 
2437     if (info->netcount ||
2438         (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
2439         rx_start(info);
2440 
2441     spin_unlock_irqrestore(&info->lock,flags);
2442 }
2443 
2444 /*
2445  * reconfigure adapter based on new parameters
2446  */
2447 static void change_params(struct slgt_info *info)
2448 {
2449     unsigned cflag;
2450     int bits_per_char;
2451 
2452     if (!info->port.tty)
2453         return;
2454     DBGINFO(("%s change_params\n", info->device_name));
2455 
2456     cflag = info->port.tty->termios.c_cflag;
2457 
2458     /* if B0 rate (hangup) specified then negate RTS and DTR */
2459     /* otherwise assert RTS and DTR */
2460     if (cflag & CBAUD)
2461         info->signals |= SerialSignal_RTS | SerialSignal_DTR;
2462     else
2463         info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
2464 
2465     /* byte size and parity */
2466 
2467     info->params.data_bits = tty_get_char_size(cflag);
2468     info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
2469 
2470     if (cflag & PARENB)
2471         info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
2472     else
2473         info->params.parity = ASYNC_PARITY_NONE;
2474 
2475     /* calculate number of jiffies to transmit a full
2476      * FIFO (32 bytes) at specified data rate
2477      */
2478     bits_per_char = info->params.data_bits +
2479             info->params.stop_bits + 1;
2480 
2481     info->params.data_rate = tty_get_baud_rate(info->port.tty);
2482 
2483     if (info->params.data_rate) {
2484         info->timeout = (32*HZ*bits_per_char) /
2485                 info->params.data_rate;
2486     }
2487     info->timeout += HZ/50;     /* Add .02 seconds of slop */
2488 
2489     tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
2490     tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
2491 
2492     /* process tty input control flags */
2493 
2494     info->read_status_mask = IRQ_RXOVER;
2495     if (I_INPCK(info->port.tty))
2496         info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
2497     if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
2498         info->read_status_mask |= MASK_BREAK;
2499     if (I_IGNPAR(info->port.tty))
2500         info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
2501     if (I_IGNBRK(info->port.tty)) {
2502         info->ignore_status_mask |= MASK_BREAK;
2503         /* If ignoring parity and break indicators, ignore
2504          * overruns too.  (For real raw support).
2505          */
2506         if (I_IGNPAR(info->port.tty))
2507             info->ignore_status_mask |= MASK_OVERRUN;
2508     }
2509 
2510     program_hw(info);
2511 }
2512 
2513 static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
2514 {
2515     DBGINFO(("%s get_stats\n",  info->device_name));
2516     if (!user_icount) {
2517         memset(&info->icount, 0, sizeof(info->icount));
2518     } else {
2519         if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
2520             return -EFAULT;
2521     }
2522     return 0;
2523 }
2524 
2525 static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
2526 {
2527     DBGINFO(("%s get_params\n", info->device_name));
2528     if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
2529         return -EFAULT;
2530     return 0;
2531 }
2532 
2533 static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
2534 {
2535     unsigned long flags;
2536     MGSL_PARAMS tmp_params;
2537 
2538     DBGINFO(("%s set_params\n", info->device_name));
2539     if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
2540         return -EFAULT;
2541 
2542     spin_lock_irqsave(&info->lock, flags);
2543     if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
2544         info->base_clock = tmp_params.clock_speed;
2545     else
2546         memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
2547     spin_unlock_irqrestore(&info->lock, flags);
2548 
2549     program_hw(info);
2550 
2551     return 0;
2552 }
2553 
2554 static int get_txidle(struct slgt_info *info, int __user *idle_mode)
2555 {
2556     DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
2557     if (put_user(info->idle_mode, idle_mode))
2558         return -EFAULT;
2559     return 0;
2560 }
2561 
2562 static int set_txidle(struct slgt_info *info, int idle_mode)
2563 {
2564     unsigned long flags;
2565     DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
2566     spin_lock_irqsave(&info->lock,flags);
2567     info->idle_mode = idle_mode;
2568     if (info->params.mode != MGSL_MODE_ASYNC)
2569         tx_set_idle(info);
2570     spin_unlock_irqrestore(&info->lock,flags);
2571     return 0;
2572 }
2573 
2574 static int tx_enable(struct slgt_info *info, int enable)
2575 {
2576     unsigned long flags;
2577     DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
2578     spin_lock_irqsave(&info->lock,flags);
2579     if (enable) {
2580         if (!info->tx_enabled)
2581             tx_start(info);
2582     } else {
2583         if (info->tx_enabled)
2584             tx_stop(info);
2585     }
2586     spin_unlock_irqrestore(&info->lock,flags);
2587     return 0;
2588 }
2589 
2590 /*
2591  * abort transmit HDLC frame
2592  */
2593 static int tx_abort(struct slgt_info *info)
2594 {
2595     unsigned long flags;
2596     DBGINFO(("%s tx_abort\n", info->device_name));
2597     spin_lock_irqsave(&info->lock,flags);
2598     tdma_reset(info);
2599     spin_unlock_irqrestore(&info->lock,flags);
2600     return 0;
2601 }
2602 
2603 static int rx_enable(struct slgt_info *info, int enable)
2604 {
2605     unsigned long flags;
2606     unsigned int rbuf_fill_level;
2607     DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
2608     spin_lock_irqsave(&info->lock,flags);
2609     /*
2610      * enable[31..16] = receive DMA buffer fill level
2611      * 0 = noop (leave fill level unchanged)
2612      * fill level must be multiple of 4 and <= buffer size
2613      */
2614     rbuf_fill_level = ((unsigned int)enable) >> 16;
2615     if (rbuf_fill_level) {
2616         if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
2617             spin_unlock_irqrestore(&info->lock, flags);
2618             return -EINVAL;
2619         }
2620         info->rbuf_fill_level = rbuf_fill_level;
2621         if (rbuf_fill_level < 128)
2622             info->rx_pio = 1; /* PIO mode */
2623         else
2624             info->rx_pio = 0; /* DMA mode */
2625         rx_stop(info); /* restart receiver to use new fill level */
2626     }
2627 
2628     /*
2629      * enable[1..0] = receiver enable command
2630      * 0 = disable
2631      * 1 = enable
2632      * 2 = enable or force hunt mode if already enabled
2633      */
2634     enable &= 3;
2635     if (enable) {
2636         if (!info->rx_enabled)
2637             rx_start(info);
2638         else if (enable == 2) {
2639             /* force hunt mode (write 1 to RCR[3]) */
2640             wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
2641         }
2642     } else {
2643         if (info->rx_enabled)
2644             rx_stop(info);
2645     }
2646     spin_unlock_irqrestore(&info->lock,flags);
2647     return 0;
2648 }
2649 
2650 /*
2651  *  wait for specified event to occur
2652  */
2653 static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
2654 {
2655     unsigned long flags;
2656     int s;
2657     int rc=0;
2658     struct mgsl_icount cprev, cnow;
2659     int events;
2660     int mask;
2661     struct  _input_signal_events oldsigs, newsigs;
2662     DECLARE_WAITQUEUE(wait, current);
2663 
2664     if (get_user(mask, mask_ptr))
2665         return -EFAULT;
2666 
2667     DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
2668 
2669     spin_lock_irqsave(&info->lock,flags);
2670 
2671     /* return immediately if state matches requested events */
2672     get_gtsignals(info);
2673     s = info->signals;
2674 
2675     events = mask &
2676         ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2677           ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2678           ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2679           ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2680     if (events) {
2681         spin_unlock_irqrestore(&info->lock,flags);
2682         goto exit;
2683     }
2684 
2685     /* save current irq counts */
2686     cprev = info->icount;
2687     oldsigs = info->input_signal_events;
2688 
2689     /* enable hunt and idle irqs if needed */
2690     if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
2691         unsigned short val = rd_reg16(info, SCR);
2692         if (!(val & IRQ_RXIDLE))
2693             wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
2694     }
2695 
2696     set_current_state(TASK_INTERRUPTIBLE);
2697     add_wait_queue(&info->event_wait_q, &wait);
2698 
2699     spin_unlock_irqrestore(&info->lock,flags);
2700 
2701     for(;;) {
2702         schedule();
2703         if (signal_pending(current)) {
2704             rc = -ERESTARTSYS;
2705             break;
2706         }
2707 
2708         /* get current irq counts */
2709         spin_lock_irqsave(&info->lock,flags);
2710         cnow = info->icount;
2711         newsigs = info->input_signal_events;
2712         set_current_state(TASK_INTERRUPTIBLE);
2713         spin_unlock_irqrestore(&info->lock,flags);
2714 
2715         /* if no change, wait aborted for some reason */
2716         if (newsigs.dsr_up   == oldsigs.dsr_up   &&
2717             newsigs.dsr_down == oldsigs.dsr_down &&
2718             newsigs.dcd_up   == oldsigs.dcd_up   &&
2719             newsigs.dcd_down == oldsigs.dcd_down &&
2720             newsigs.cts_up   == oldsigs.cts_up   &&
2721             newsigs.cts_down == oldsigs.cts_down &&
2722             newsigs.ri_up    == oldsigs.ri_up    &&
2723             newsigs.ri_down  == oldsigs.ri_down  &&
2724             cnow.exithunt    == cprev.exithunt   &&
2725             cnow.rxidle      == cprev.rxidle) {
2726             rc = -EIO;
2727             break;
2728         }
2729 
2730         events = mask &
2731             ( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
2732               (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2733               (newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
2734               (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2735               (newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
2736               (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2737               (newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
2738               (newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
2739               (cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
2740               (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
2741         if (events)
2742             break;
2743 
2744         cprev = cnow;
2745         oldsigs = newsigs;
2746     }
2747 
2748     remove_wait_queue(&info->event_wait_q, &wait);
2749     set_current_state(TASK_RUNNING);
2750 
2751 
2752     if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2753         spin_lock_irqsave(&info->lock,flags);
2754         if (!waitqueue_active(&info->event_wait_q)) {
2755             /* disable enable exit hunt mode/idle rcvd IRQs */
2756             wr_reg16(info, SCR,
2757                 (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
2758         }
2759         spin_unlock_irqrestore(&info->lock,flags);
2760     }
2761 exit:
2762     if (rc == 0)
2763         rc = put_user(events, mask_ptr);
2764     return rc;
2765 }
2766 
2767 static int get_interface(struct slgt_info *info, int __user *if_mode)
2768 {
2769     DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
2770     if (put_user(info->if_mode, if_mode))
2771         return -EFAULT;
2772     return 0;
2773 }
2774 
2775 static int set_interface(struct slgt_info *info, int if_mode)
2776 {
2777     unsigned long flags;
2778     unsigned short val;
2779 
2780     DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2781     spin_lock_irqsave(&info->lock,flags);
2782     info->if_mode = if_mode;
2783 
2784     msc_set_vcr(info);
2785 
2786     /* TCR (tx control) 07  1=RTS driver control */
2787     val = rd_reg16(info, TCR);
2788     if (info->if_mode & MGSL_INTERFACE_RTS_EN)
2789         val |= BIT7;
2790     else
2791         val &= ~BIT7;
2792     wr_reg16(info, TCR, val);
2793 
2794     spin_unlock_irqrestore(&info->lock,flags);
2795     return 0;
2796 }
2797 
2798 static int get_xsync(struct slgt_info *info, int __user *xsync)
2799 {
2800     DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
2801     if (put_user(info->xsync, xsync))
2802         return -EFAULT;
2803     return 0;
2804 }
2805 
2806 /*
2807  * set extended sync pattern (1 to 4 bytes) for extended sync mode
2808  *
2809  * sync pattern is contained in least significant bytes of value
2810  * most significant byte of sync pattern is oldest (1st sent/detected)
2811  */
2812 static int set_xsync(struct slgt_info *info, int xsync)
2813 {
2814     unsigned long flags;
2815 
2816     DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
2817     spin_lock_irqsave(&info->lock, flags);
2818     info->xsync = xsync;
2819     wr_reg32(info, XSR, xsync);
2820     spin_unlock_irqrestore(&info->lock, flags);
2821     return 0;
2822 }
2823 
2824 static int get_xctrl(struct slgt_info *info, int __user *xctrl)
2825 {
2826     DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
2827     if (put_user(info->xctrl, xctrl))
2828         return -EFAULT;
2829     return 0;
2830 }
2831 
2832 /*
2833  * set extended control options
2834  *
2835  * xctrl[31:19] reserved, must be zero
2836  * xctrl[18:17] extended sync pattern length in bytes
2837  *              00 = 1 byte  in xsr[7:0]
2838  *              01 = 2 bytes in xsr[15:0]
2839  *              10 = 3 bytes in xsr[23:0]
2840  *              11 = 4 bytes in xsr[31:0]
2841  * xctrl[16]    1 = enable terminal count, 0=disabled
2842  * xctrl[15:0]  receive terminal count for fixed length packets
2843  *              value is count minus one (0 = 1 byte packet)
2844  *              when terminal count is reached, receiver
2845  *              automatically returns to hunt mode and receive
2846  *              FIFO contents are flushed to DMA buffers with
2847  *              end of frame (EOF) status
2848  */
2849 static int set_xctrl(struct slgt_info *info, int xctrl)
2850 {
2851     unsigned long flags;
2852 
2853     DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
2854     spin_lock_irqsave(&info->lock, flags);
2855     info->xctrl = xctrl;
2856     wr_reg32(info, XCR, xctrl);
2857     spin_unlock_irqrestore(&info->lock, flags);
2858     return 0;
2859 }
2860 
2861 /*
2862  * set general purpose IO pin state and direction
2863  *
2864  * user_gpio fields:
2865  * state   each bit indicates a pin state
2866  * smask   set bit indicates pin state to set
2867  * dir     each bit indicates a pin direction (0=input, 1=output)
2868  * dmask   set bit indicates pin direction to set
2869  */
2870 static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2871 {
2872     unsigned long flags;
2873     struct gpio_desc gpio;
2874     __u32 data;
2875 
2876     if (!info->gpio_present)
2877         return -EINVAL;
2878     if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2879         return -EFAULT;
2880     DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
2881          info->device_name, gpio.state, gpio.smask,
2882          gpio.dir, gpio.dmask));
2883 
2884     spin_lock_irqsave(&info->port_array[0]->lock, flags);
2885     if (gpio.dmask) {
2886         data = rd_reg32(info, IODR);
2887         data |= gpio.dmask & gpio.dir;
2888         data &= ~(gpio.dmask & ~gpio.dir);
2889         wr_reg32(info, IODR, data);
2890     }
2891     if (gpio.smask) {
2892         data = rd_reg32(info, IOVR);
2893         data |= gpio.smask & gpio.state;
2894         data &= ~(gpio.smask & ~gpio.state);
2895         wr_reg32(info, IOVR, data);
2896     }
2897     spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
2898 
2899     return 0;
2900 }
2901 
2902 /*
2903  * get general purpose IO pin state and direction
2904  */
2905 static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2906 {
2907     struct gpio_desc gpio;
2908     if (!info->gpio_present)
2909         return -EINVAL;
2910     gpio.state = rd_reg32(info, IOVR);
2911     gpio.smask = 0xffffffff;
2912     gpio.dir   = rd_reg32(info, IODR);
2913     gpio.dmask = 0xffffffff;
2914     if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2915         return -EFAULT;
2916     DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
2917          info->device_name, gpio.state, gpio.dir));
2918     return 0;
2919 }
2920 
2921 /*
2922  * conditional wait facility
2923  */
2924 static void init_cond_wait(struct cond_wait *w, unsigned int data)
2925 {
2926     init_waitqueue_head(&w->q);
2927     init_waitqueue_entry(&w->wait, current);
2928     w->data = data;
2929 }
2930 
2931 static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
2932 {
2933     set_current_state(TASK_INTERRUPTIBLE);
2934     add_wait_queue(&w->q, &w->wait);
2935     w->next = *head;
2936     *head = w;
2937 }
2938 
2939 static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
2940 {
2941     struct cond_wait *w, *prev;
2942     remove_wait_queue(&cw->q, &cw->wait);
2943     set_current_state(TASK_RUNNING);
2944     for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
2945         if (w == cw) {
2946             if (prev != NULL)
2947                 prev->next = w->next;
2948             else
2949                 *head = w->next;
2950             break;
2951         }
2952     }
2953 }
2954 
2955 static void flush_cond_wait(struct cond_wait **head)
2956 {
2957     while (*head != NULL) {
2958         wake_up_interruptible(&(*head)->q);
2959         *head = (*head)->next;
2960     }
2961 }
2962 
2963 /*
2964  * wait for general purpose I/O pin(s) to enter specified state
2965  *
2966  * user_gpio fields:
2967  * state - bit indicates target pin state
2968  * smask - set bit indicates watched pin
2969  *
2970  * The wait ends when at least one watched pin enters the specified
2971  * state. When 0 (no error) is returned, user_gpio->state is set to the
2972  * state of all GPIO pins when the wait ends.
2973  *
2974  * Note: Each pin may be a dedicated input, dedicated output, or
2975  * configurable input/output. The number and configuration of pins
2976  * varies with the specific adapter model. Only input pins (dedicated
2977  * or configured) can be monitored with this function.
2978  */
2979 static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2980 {
2981     unsigned long flags;
2982     int rc = 0;
2983     struct gpio_desc gpio;
2984     struct cond_wait wait;
2985     u32 state;
2986 
2987     if (!info->gpio_present)
2988         return -EINVAL;
2989     if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2990         return -EFAULT;
2991     DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
2992          info->device_name, gpio.state, gpio.smask));
2993     /* ignore output pins identified by set IODR bit */
2994     if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
2995         return -EINVAL;
2996     init_cond_wait(&wait, gpio.smask);
2997 
2998     spin_lock_irqsave(&info->port_array[0]->lock, flags);
2999     /* enable interrupts for watched pins */
3000     wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
3001     /* get current pin states */
3002     state = rd_reg32(info, IOVR);
3003 
3004     if (gpio.smask & ~(state ^ gpio.state)) {
3005         /* already in target state */
3006         gpio.state = state;
3007     } else {
3008         /* wait for target state */
3009         add_cond_wait(&info->gpio_wait_q, &wait);
3010         spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
3011         schedule();
3012         if (signal_pending(current))
3013             rc = -ERESTARTSYS;
3014         else
3015             gpio.state = wait.data;
3016         spin_lock_irqsave(&info->port_array[0]->lock, flags);
3017         remove_cond_wait(&info->gpio_wait_q, &wait);
3018     }
3019 
3020     /* disable all GPIO interrupts if no waiting processes */
3021     if (info->gpio_wait_q == NULL)
3022         wr_reg32(info, IOER, 0);
3023     spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
3024 
3025     if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
3026         rc = -EFAULT;
3027     return rc;
3028 }
3029 
3030 static int modem_input_wait(struct slgt_info *info,int arg)
3031 {
3032     unsigned long flags;
3033     int rc;
3034     struct mgsl_icount cprev, cnow;
3035     DECLARE_WAITQUEUE(wait, current);
3036 
3037     /* save current irq counts */
3038     spin_lock_irqsave(&info->lock,flags);
3039     cprev = info->icount;
3040     add_wait_queue(&info->status_event_wait_q, &wait);
3041     set_current_state(TASK_INTERRUPTIBLE);
3042     spin_unlock_irqrestore(&info->lock,flags);
3043 
3044     for(;;) {
3045         schedule();
3046         if (signal_pending(current)) {
3047             rc = -ERESTARTSYS;
3048             break;
3049         }
3050 
3051         /* get new irq counts */
3052         spin_lock_irqsave(&info->lock,flags);
3053         cnow = info->icount;
3054         set_current_state(TASK_INTERRUPTIBLE);
3055         spin_unlock_irqrestore(&info->lock,flags);
3056 
3057         /* if no change, wait aborted for some reason */
3058         if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
3059             cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
3060             rc = -EIO;
3061             break;
3062         }
3063 
3064         /* check for change in caller specified modem input */
3065         if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
3066             (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
3067             (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
3068             (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
3069             rc = 0;
3070             break;
3071         }
3072 
3073         cprev = cnow;
3074     }
3075     remove_wait_queue(&info->status_event_wait_q, &wait);
3076     set_current_state(TASK_RUNNING);
3077     return rc;
3078 }
3079 
3080 /*
3081  *  return state of serial control and status signals
3082  */
3083 static int tiocmget(struct tty_struct *tty)
3084 {
3085     struct slgt_info *info = tty->driver_data;
3086     unsigned int result;
3087     unsigned long flags;
3088 
3089     spin_lock_irqsave(&info->lock,flags);
3090     get_gtsignals(info);
3091     spin_unlock_irqrestore(&info->lock,flags);
3092 
3093     result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
3094         ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
3095         ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
3096         ((info->signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
3097         ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
3098         ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
3099 
3100     DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
3101     return result;
3102 }
3103 
3104 /*
3105  * set modem control signals (DTR/RTS)
3106  *
3107  *  cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
3108  *      TIOCMSET = set/clear signal values
3109  *  value   bit mask for command
3110  */
3111 static int tiocmset(struct tty_struct *tty,
3112             unsigned int set, unsigned int clear)
3113 {
3114     struct slgt_info *info = tty->driver_data;
3115     unsigned long flags;
3116 
3117     DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
3118 
3119     if (set & TIOCM_RTS)
3120         info->signals |= SerialSignal_RTS;
3121     if (set & TIOCM_DTR)
3122         info->signals |= SerialSignal_DTR;
3123     if (clear & TIOCM_RTS)
3124         info->signals &= ~SerialSignal_RTS;
3125     if (clear & TIOCM_DTR)
3126         info->signals &= ~SerialSignal_DTR;
3127 
3128     spin_lock_irqsave(&info->lock,flags);
3129     set_gtsignals(info);
3130     spin_unlock_irqrestore(&info->lock,flags);
3131     return 0;
3132 }
3133 
3134 static int carrier_raised(struct tty_port *port)
3135 {
3136     unsigned long flags;
3137     struct slgt_info *info = container_of(port, struct slgt_info, port);
3138 
3139     spin_lock_irqsave(&info->lock,flags);
3140     get_gtsignals(info);
3141     spin_unlock_irqrestore(&info->lock,flags);
3142     return (info->signals & SerialSignal_DCD) ? 1 : 0;
3143 }
3144 
3145 static void dtr_rts(struct tty_port *port, int on)
3146 {
3147     unsigned long flags;
3148     struct slgt_info *info = container_of(port, struct slgt_info, port);
3149 
3150     spin_lock_irqsave(&info->lock,flags);
3151     if (on)
3152         info->signals |= SerialSignal_RTS | SerialSignal_DTR;
3153     else
3154         info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3155     set_gtsignals(info);
3156     spin_unlock_irqrestore(&info->lock,flags);
3157 }
3158 
3159 
3160 /*
3161  *  block current process until the device is ready to open
3162  */
3163 static int block_til_ready(struct tty_struct *tty, struct file *filp,
3164                struct slgt_info *info)
3165 {
3166     DECLARE_WAITQUEUE(wait, current);
3167     int     retval;
3168     bool        do_clocal = false;
3169     unsigned long   flags;
3170     int     cd;
3171     struct tty_port *port = &info->port;
3172 
3173     DBGINFO(("%s block_til_ready\n", tty->driver->name));
3174 
3175     if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3176         /* nonblock mode is set or port is not enabled */
3177         tty_port_set_active(port, 1);
3178         return 0;
3179     }
3180 
3181     if (C_CLOCAL(tty))
3182         do_clocal = true;
3183 
3184     /* Wait for carrier detect and the line to become
3185      * free (i.e., not in use by the callout).  While we are in
3186      * this loop, port->count is dropped by one, so that
3187      * close() knows when to free things.  We restore it upon
3188      * exit, either normal or abnormal.
3189      */
3190 
3191     retval = 0;
3192     add_wait_queue(&port->open_wait, &wait);
3193 
3194     spin_lock_irqsave(&info->lock, flags);
3195     port->count--;
3196     spin_unlock_irqrestore(&info->lock, flags);
3197     port->blocked_open++;
3198 
3199     while (1) {
3200         if (C_BAUD(tty) && tty_port_initialized(port))
3201             tty_port_raise_dtr_rts(port);
3202 
3203         set_current_state(TASK_INTERRUPTIBLE);
3204 
3205         if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3206             retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3207                     -EAGAIN : -ERESTARTSYS;
3208             break;
3209         }
3210 
3211         cd = tty_port_carrier_raised(port);
3212         if (do_clocal || cd)
3213             break;
3214 
3215         if (signal_pending(current)) {
3216             retval = -ERESTARTSYS;
3217             break;
3218         }
3219 
3220         DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
3221         tty_unlock(tty);
3222         schedule();
3223         tty_lock(tty);
3224     }
3225 
3226     set_current_state(TASK_RUNNING);
3227     remove_wait_queue(&port->open_wait, &wait);
3228 
3229     if (!tty_hung_up_p(filp))
3230         port->count++;
3231     port->blocked_open--;
3232 
3233     if (!retval)
3234         tty_port_set_active(port, 1);
3235 
3236     DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
3237     return retval;
3238 }
3239 
3240 /*
3241  * allocate buffers used for calling line discipline receive_buf
3242  * directly in synchronous mode
3243  * note: add 5 bytes to max frame size to allow appending
3244  * 32-bit CRC and status byte when configured to do so
3245  */
3246 static int alloc_tmp_rbuf(struct slgt_info *info)
3247 {
3248     info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
3249     if (info->tmp_rbuf == NULL)
3250         return -ENOMEM;
3251     /* unused flag buffer to satisfy receive_buf calling interface */
3252     info->flag_buf = kzalloc(info->max_frame_size + 5, GFP_KERNEL);
3253     if (!info->flag_buf) {
3254         kfree(info->tmp_rbuf);
3255         info->tmp_rbuf = NULL;
3256         return -ENOMEM;
3257     }
3258     return 0;
3259 }
3260 
3261 static void free_tmp_rbuf(struct slgt_info *info)
3262 {
3263     kfree(info->tmp_rbuf);
3264     info->tmp_rbuf = NULL;
3265     kfree(info->flag_buf);
3266     info->flag_buf = NULL;
3267 }
3268 
3269 /*
3270  * allocate DMA descriptor lists.
3271  */
3272 static int alloc_desc(struct slgt_info *info)
3273 {
3274     unsigned int i;
3275     unsigned int pbufs;
3276 
3277     /* allocate memory to hold descriptor lists */
3278     info->bufs = dma_alloc_coherent(&info->pdev->dev, DESC_LIST_SIZE,
3279                     &info->bufs_dma_addr, GFP_KERNEL);
3280     if (info->bufs == NULL)
3281         return -ENOMEM;
3282 
3283     info->rbufs = (struct slgt_desc*)info->bufs;
3284     info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
3285 
3286     pbufs = (unsigned int)info->bufs_dma_addr;
3287 
3288     /*
3289      * Build circular lists of descriptors
3290      */
3291 
3292     for (i=0; i < info->rbuf_count; i++) {
3293         /* physical address of this descriptor */
3294         info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
3295 
3296         /* physical address of next descriptor */
3297         if (i == info->rbuf_count - 1)
3298             info->rbufs[i].next = cpu_to_le32(pbufs);
3299         else
3300             info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
3301         set_desc_count(info->rbufs[i], DMABUFSIZE);
3302     }
3303 
3304     for (i=0; i < info->tbuf_count; i++) {
3305         /* physical address of this descriptor */
3306         info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
3307 
3308         /* physical address of next descriptor */
3309         if (i == info->tbuf_count - 1)
3310             info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
3311         else
3312             info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
3313     }
3314 
3315     return 0;
3316 }
3317 
3318 static void free_desc(struct slgt_info *info)
3319 {
3320     if (info->bufs != NULL) {
3321         dma_free_coherent(&info->pdev->dev, DESC_LIST_SIZE,
3322                   info->bufs, info->bufs_dma_addr);
3323         info->bufs  = NULL;
3324         info->rbufs = NULL;
3325         info->tbufs = NULL;
3326     }
3327 }
3328 
3329 static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3330 {
3331     int i;
3332     for (i=0; i < count; i++) {
3333         bufs[i].buf = dma_alloc_coherent(&info->pdev->dev, DMABUFSIZE,
3334                          &bufs[i].buf_dma_addr, GFP_KERNEL);
3335         if (!bufs[i].buf)
3336             return -ENOMEM;
3337         bufs[i].pbuf  = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
3338     }
3339     return 0;
3340 }
3341 
3342 static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3343 {
3344     int i;
3345     for (i=0; i < count; i++) {
3346         if (bufs[i].buf == NULL)
3347             continue;
3348         dma_free_coherent(&info->pdev->dev, DMABUFSIZE, bufs[i].buf,
3349                   bufs[i].buf_dma_addr);
3350         bufs[i].buf = NULL;
3351     }
3352 }
3353 
3354 static int alloc_dma_bufs(struct slgt_info *info)
3355 {
3356     info->rbuf_count = 32;
3357     info->tbuf_count = 32;
3358 
3359     if (alloc_desc(info) < 0 ||
3360         alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
3361         alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
3362         alloc_tmp_rbuf(info) < 0) {
3363         DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
3364         return -ENOMEM;
3365     }
3366     reset_rbufs(info);
3367     return 0;
3368 }
3369 
3370 static void free_dma_bufs(struct slgt_info *info)
3371 {
3372     if (info->bufs) {
3373         free_bufs(info, info->rbufs, info->rbuf_count);
3374         free_bufs(info, info->tbufs, info->tbuf_count);
3375         free_desc(info);
3376     }
3377     free_tmp_rbuf(info);
3378 }
3379 
3380 static int claim_resources(struct slgt_info *info)
3381 {
3382     if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
3383         DBGERR(("%s reg addr conflict, addr=%08X\n",
3384             info->device_name, info->phys_reg_addr));
3385         info->init_error = DiagStatus_AddressConflict;
3386         goto errout;
3387     }
3388     else
3389         info->reg_addr_requested = true;
3390 
3391     info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
3392     if (!info->reg_addr) {
3393         DBGERR(("%s can't map device registers, addr=%08X\n",
3394             info->device_name, info->phys_reg_addr));
3395         info->init_error = DiagStatus_CantAssignPciResources;
3396         goto errout;
3397     }
3398     return 0;
3399 
3400 errout:
3401     release_resources(info);
3402     return -ENODEV;
3403 }
3404 
3405 static void release_resources(struct slgt_info *info)
3406 {
3407     if (info->irq_requested) {
3408         free_irq(info->irq_level, info);
3409         info->irq_requested = false;
3410     }
3411 
3412     if (info->reg_addr_requested) {
3413         release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
3414         info->reg_addr_requested = false;
3415     }
3416 
3417     if (info->reg_addr) {
3418         iounmap(info->reg_addr);
3419         info->reg_addr = NULL;
3420     }
3421 }
3422 
3423 /* Add the specified device instance data structure to the
3424  * global linked list of devices and increment the device count.
3425  */
3426 static void add_device(struct slgt_info *info)
3427 {
3428     char *devstr;
3429 
3430     info->next_device = NULL;
3431     info->line = slgt_device_count;
3432     sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
3433 
3434     if (info->line < MAX_DEVICES) {
3435         if (maxframe[info->line])
3436             info->max_frame_size = maxframe[info->line];
3437     }
3438 
3439     slgt_device_count++;
3440 
3441     if (!slgt_device_list)
3442         slgt_device_list = info;
3443     else {
3444         struct slgt_info *current_dev = slgt_device_list;
3445         while(current_dev->next_device)
3446             current_dev = current_dev->next_device;
3447         current_dev->next_device = info;
3448     }
3449 
3450     if (info->max_frame_size < 4096)
3451         info->max_frame_size = 4096;
3452     else if (info->max_frame_size > 65535)
3453         info->max_frame_size = 65535;
3454 
3455     switch(info->pdev->device) {
3456     case SYNCLINK_GT_DEVICE_ID:
3457         devstr = "GT";
3458         break;
3459     case SYNCLINK_GT2_DEVICE_ID:
3460         devstr = "GT2";
3461         break;
3462     case SYNCLINK_GT4_DEVICE_ID:
3463         devstr = "GT4";
3464         break;
3465     case SYNCLINK_AC_DEVICE_ID:
3466         devstr = "AC";
3467         info->params.mode = MGSL_MODE_ASYNC;
3468         break;
3469     default:
3470         devstr = "(unknown model)";
3471     }
3472     printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
3473         devstr, info->device_name, info->phys_reg_addr,
3474         info->irq_level, info->max_frame_size);
3475 
3476 #if SYNCLINK_GENERIC_HDLC
3477     hdlcdev_init(info);
3478 #endif
3479 }
3480 
3481 static const struct tty_port_operations slgt_port_ops = {
3482     .carrier_raised = carrier_raised,
3483     .dtr_rts = dtr_rts,
3484 };
3485 
3486 /*
3487  *  allocate device instance structure, return NULL on failure
3488  */
3489 static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3490 {
3491     struct slgt_info *info;
3492 
3493     info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
3494 
3495     if (!info) {
3496         DBGERR(("%s device alloc failed adapter=%d port=%d\n",
3497             driver_name, adapter_num, port_num));
3498     } else {
3499         tty_port_init(&info->port);
3500         info->port.ops = &slgt_port_ops;
3501         info->magic = MGSL_MAGIC;
3502         INIT_WORK(&info->task, bh_handler);
3503         info->max_frame_size = 4096;
3504         info->base_clock = 14745600;
3505         info->rbuf_fill_level = DMABUFSIZE;
3506         init_waitqueue_head(&info->status_event_wait_q);
3507         init_waitqueue_head(&info->event_wait_q);
3508         spin_lock_init(&info->netlock);
3509         memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
3510         info->idle_mode = HDLC_TXIDLE_FLAGS;
3511         info->adapter_num = adapter_num;
3512         info->port_num = port_num;
3513 
3514         timer_setup(&info->tx_timer, tx_timeout, 0);
3515         timer_setup(&info->rx_timer, rx_timeout, 0);
3516 
3517         /* Copy configuration info to device instance data */
3518         info->pdev = pdev;
3519         info->irq_level = pdev->irq;
3520         info->phys_reg_addr = pci_resource_start(pdev,0);
3521 
3522         info->bus_type = MGSL_BUS_TYPE_PCI;
3523         info->irq_flags = IRQF_SHARED;
3524 
3525         info->init_error = -1; /* assume error, set to 0 on successful init */
3526     }
3527 
3528     return info;
3529 }
3530 
3531 static void device_init(int adapter_num, struct pci_dev *pdev)
3532 {
3533     struct slgt_info *port_array[SLGT_MAX_PORTS];
3534     int i;
3535     int port_count = 1;
3536 
3537     if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
3538         port_count = 2;
3539     else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
3540         port_count = 4;
3541 
3542     /* allocate device instances for all ports */
3543     for (i=0; i < port_count; ++i) {
3544         port_array[i] = alloc_dev(adapter_num, i, pdev);
3545         if (port_array[i] == NULL) {
3546             for (--i; i >= 0; --i) {
3547                 tty_port_destroy(&port_array[i]->port);
3548                 kfree(port_array[i]);
3549             }
3550             return;
3551         }
3552     }
3553 
3554     /* give copy of port_array to all ports and add to device list  */
3555     for (i=0; i < port_count; ++i) {
3556         memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
3557         add_device(port_array[i]);
3558         port_array[i]->port_count = port_count;
3559         spin_lock_init(&port_array[i]->lock);
3560     }
3561 
3562     /* Allocate and claim adapter resources */
3563     if (!claim_resources(port_array[0])) {
3564 
3565         alloc_dma_bufs(port_array[0]);
3566 
3567         /* copy resource information from first port to others */
3568         for (i = 1; i < port_count; ++i) {
3569             port_array[i]->irq_level = port_array[0]->irq_level;
3570             port_array[i]->reg_addr  = port_array[0]->reg_addr;
3571             alloc_dma_bufs(port_array[i]);
3572         }
3573 
3574         if (request_irq(port_array[0]->irq_level,
3575                     slgt_interrupt,
3576                     port_array[0]->irq_flags,
3577                     port_array[0]->device_name,
3578                     port_array[0]) < 0) {
3579             DBGERR(("%s request_irq failed IRQ=%d\n",
3580                 port_array[0]->device_name,
3581                 port_array[0]->irq_level));
3582         } else {
3583             port_array[0]->irq_requested = true;
3584             adapter_test(port_array[0]);
3585             for (i=1 ; i < port_count ; i++) {
3586                 port_array[i]->init_error = port_array[0]->init_error;
3587                 port_array[i]->gpio_present = port_array[0]->gpio_present;
3588             }
3589         }
3590     }
3591 
3592     for (i = 0; i < port_count; ++i) {
3593         struct slgt_info *info = port_array[i];
3594         tty_port_register_device(&info->port, serial_driver, info->line,
3595                 &info->pdev->dev);
3596     }
3597 }
3598 
3599 static int init_one(struct pci_dev *dev,
3600                   const struct pci_device_id *ent)
3601 {
3602     if (pci_enable_device(dev)) {
3603         printk("error enabling pci device %p\n", dev);
3604         return -EIO;
3605     }
3606     pci_set_master(dev);
3607     device_init(slgt_device_count, dev);
3608     return 0;
3609 }
3610 
3611 static void remove_one(struct pci_dev *dev)
3612 {
3613 }
3614 
3615 static const struct tty_operations ops = {
3616     .open = open,
3617     .close = close,
3618     .write = write,
3619     .put_char = put_char,
3620     .flush_chars = flush_chars,
3621     .write_room = write_room,
3622     .chars_in_buffer = chars_in_buffer,
3623     .flush_buffer = flush_buffer,
3624     .ioctl = ioctl,
3625     .compat_ioctl = slgt_compat_ioctl,
3626     .throttle = throttle,
3627     .unthrottle = unthrottle,
3628     .send_xchar = send_xchar,
3629     .break_ctl = set_break,
3630     .wait_until_sent = wait_until_sent,
3631     .set_termios = set_termios,
3632     .stop = tx_hold,
3633     .start = tx_release,
3634     .hangup = hangup,
3635     .tiocmget = tiocmget,
3636     .tiocmset = tiocmset,
3637     .get_icount = get_icount,
3638     .proc_show = synclink_gt_proc_show,
3639 };
3640 
3641 static void slgt_cleanup(void)
3642 {
3643     struct slgt_info *info;
3644     struct slgt_info *tmp;
3645 
3646     printk(KERN_INFO "unload %s\n", driver_name);
3647 
3648     if (serial_driver) {
3649         for (info=slgt_device_list ; info != NULL ; info=info->next_device)
3650             tty_unregister_device(serial_driver, info->line);
3651         tty_unregister_driver(serial_driver);
3652         tty_driver_kref_put(serial_driver);
3653     }
3654 
3655     /* reset devices */
3656     info = slgt_device_list;
3657     while(info) {
3658         reset_port(info);
3659         info = info->next_device;
3660     }
3661 
3662     /* release devices */
3663     info = slgt_device_list;
3664     while(info) {
3665 #if SYNCLINK_GENERIC_HDLC
3666         hdlcdev_exit(info);
3667 #endif
3668         free_dma_bufs(info);
3669         free_tmp_rbuf(info);
3670         if (info->port_num == 0)
3671             release_resources(info);
3672         tmp = info;
3673         info = info->next_device;
3674         tty_port_destroy(&tmp->port);
3675         kfree(tmp);
3676     }
3677 
3678     if (pci_registered)
3679         pci_unregister_driver(&pci_driver);
3680 }
3681 
3682 /*
3683  *  Driver initialization entry point.
3684  */
3685 static int __init slgt_init(void)
3686 {
3687     int rc;
3688 
3689     printk(KERN_INFO "%s\n", driver_name);
3690 
3691     serial_driver = tty_alloc_driver(MAX_DEVICES, TTY_DRIVER_REAL_RAW |
3692             TTY_DRIVER_DYNAMIC_DEV);
3693     if (IS_ERR(serial_driver)) {
3694         printk("%s can't allocate tty driver\n", driver_name);
3695         return PTR_ERR(serial_driver);
3696     }
3697 
3698     /* Initialize the tty_driver structure */
3699 
3700     serial_driver->driver_name = slgt_driver_name;
3701     serial_driver->name = tty_dev_prefix;
3702     serial_driver->major = ttymajor;
3703     serial_driver->minor_start = 64;
3704     serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
3705     serial_driver->subtype = SERIAL_TYPE_NORMAL;
3706     serial_driver->init_termios = tty_std_termios;
3707     serial_driver->init_termios.c_cflag =
3708         B9600 | CS8 | CREAD | HUPCL | CLOCAL;
3709     serial_driver->init_termios.c_ispeed = 9600;
3710     serial_driver->init_termios.c_ospeed = 9600;
3711     tty_set_operations(serial_driver, &ops);
3712     if ((rc = tty_register_driver(serial_driver)) < 0) {
3713         DBGERR(("%s can't register serial driver\n", driver_name));
3714         tty_driver_kref_put(serial_driver);
3715         serial_driver = NULL;
3716         goto error;
3717     }
3718 
3719     printk(KERN_INFO "%s, tty major#%d\n",
3720            driver_name, serial_driver->major);
3721 
3722     slgt_device_count = 0;
3723     if ((rc = pci_register_driver(&pci_driver)) < 0) {
3724         printk("%s pci_register_driver error=%d\n", driver_name, rc);
3725         goto error;
3726     }
3727     pci_registered = true;
3728 
3729     if (!slgt_device_list)
3730         printk("%s no devices found\n",driver_name);
3731 
3732     return 0;
3733 
3734 error:
3735     slgt_cleanup();
3736     return rc;
3737 }
3738 
3739 static void __exit slgt_exit(void)
3740 {
3741     slgt_cleanup();
3742 }
3743 
3744 module_init(slgt_init);
3745 module_exit(slgt_exit);
3746 
3747 /*
3748  * register access routines
3749  */
3750 
3751 #define CALC_REGADDR() \
3752     unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
3753     if (addr >= 0x80) \
3754         reg_addr += (info->port_num) * 32; \
3755     else if (addr >= 0x40)  \
3756         reg_addr += (info->port_num) * 16;
3757 
3758 static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
3759 {
3760     CALC_REGADDR();
3761     return readb((void __iomem *)reg_addr);
3762 }
3763 
3764 static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
3765 {
3766     CALC_REGADDR();
3767     writeb(value, (void __iomem *)reg_addr);
3768 }
3769 
3770 static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
3771 {
3772     CALC_REGADDR();
3773     return readw((void __iomem *)reg_addr);
3774 }
3775 
3776 static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
3777 {
3778     CALC_REGADDR();
3779     writew(value, (void __iomem *)reg_addr);
3780 }
3781 
3782 static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
3783 {
3784     CALC_REGADDR();
3785     return readl((void __iomem *)reg_addr);
3786 }
3787 
3788 static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
3789 {
3790     CALC_REGADDR();
3791     writel(value, (void __iomem *)reg_addr);
3792 }
3793 
3794 static void rdma_reset(struct slgt_info *info)
3795 {
3796     unsigned int i;
3797 
3798     /* set reset bit */
3799     wr_reg32(info, RDCSR, BIT1);
3800 
3801     /* wait for enable bit cleared */
3802     for(i=0 ; i < 1000 ; i++)
3803         if (!(rd_reg32(info, RDCSR) & BIT0))
3804             break;
3805 }
3806 
3807 static void tdma_reset(struct slgt_info *info)
3808 {
3809     unsigned int i;
3810 
3811     /* set reset bit */
3812     wr_reg32(info, TDCSR, BIT1);
3813 
3814     /* wait for enable bit cleared */
3815     for(i=0 ; i < 1000 ; i++)
3816         if (!(rd_reg32(info, TDCSR) & BIT0))
3817             break;
3818 }
3819 
3820 /*
3821  * enable internal loopback
3822  * TxCLK and RxCLK are generated from BRG
3823  * and TxD is looped back to RxD internally.
3824  */
3825 static void enable_loopback(struct slgt_info *info)
3826 {
3827     /* SCR (serial control) BIT2=loopback enable */
3828     wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
3829 
3830     if (info->params.mode != MGSL_MODE_ASYNC) {
3831         /* CCR (clock control)
3832          * 07..05  tx clock source (010 = BRG)
3833          * 04..02  rx clock source (010 = BRG)
3834          * 01      auxclk enable   (0 = disable)
3835          * 00      BRG enable      (1 = enable)
3836          *
3837          * 0100 1001
3838          */
3839         wr_reg8(info, CCR, 0x49);
3840 
3841         /* set speed if available, otherwise use default */
3842         if (info->params.clock_speed)
3843             set_rate(info, info->params.clock_speed);
3844         else
3845             set_rate(info, 3686400);
3846     }
3847 }
3848 
3849 /*
3850  *  set baud rate generator to specified rate
3851  */
3852 static void set_rate(struct slgt_info *info, u32 rate)
3853 {
3854     unsigned int div;
3855     unsigned int osc = info->base_clock;
3856 
3857     /* div = osc/rate - 1
3858      *
3859      * Round div up if osc/rate is not integer to
3860      * force to next slowest rate.
3861      */
3862 
3863     if (rate) {
3864         div = osc/rate;
3865         if (!(osc % rate) && div)
3866             div--;
3867         wr_reg16(info, BDR, (unsigned short)div);
3868     }
3869 }
3870 
3871 static void rx_stop(struct slgt_info *info)
3872 {
3873     unsigned short val;
3874 
3875     /* disable and reset receiver */
3876     val = rd_reg16(info, RCR) & ~BIT1;          /* clear enable bit */
3877     wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3878     wr_reg16(info, RCR, val);                  /* clear reset bit */
3879 
3880     slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
3881 
3882     /* clear pending rx interrupts */
3883     wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
3884 
3885     rdma_reset(info);
3886 
3887     info->rx_enabled = false;
3888     info->rx_restart = false;
3889 }
3890 
3891 static void rx_start(struct slgt_info *info)
3892 {
3893     unsigned short val;
3894 
3895     slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
3896 
3897     /* clear pending rx overrun IRQ */
3898     wr_reg16(info, SSR, IRQ_RXOVER);
3899 
3900     /* reset and disable receiver */
3901     val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
3902     wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3903     wr_reg16(info, RCR, val);                  /* clear reset bit */
3904 
3905     rdma_reset(info);
3906     reset_rbufs(info);
3907 
3908     if (info->rx_pio) {
3909         /* rx request when rx FIFO not empty */
3910         wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
3911         slgt_irq_on(info, IRQ_RXDATA);
3912         if (info->params.mode == MGSL_MODE_ASYNC) {
3913             /* enable saving of rx status */
3914             wr_reg32(info, RDCSR, BIT6);
3915         }
3916     } else {
3917         /* rx request when rx FIFO half full */
3918         wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
3919         /* set 1st descriptor address */
3920         wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
3921 
3922         if (info->params.mode != MGSL_MODE_ASYNC) {
3923             /* enable rx DMA and DMA interrupt */
3924             wr_reg32(info, RDCSR, (BIT2 + BIT0));
3925         } else {
3926             /* enable saving of rx status, rx DMA and DMA interrupt */
3927             wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
3928         }
3929     }
3930 
3931     slgt_irq_on(info, IRQ_RXOVER);
3932 
3933     /* enable receiver */
3934     wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
3935 
3936     info->rx_restart = false;
3937     info->rx_enabled = true;
3938 }
3939 
3940 static void tx_start(struct slgt_info *info)
3941 {
3942     if (!info->tx_enabled) {
3943         wr_reg16(info, TCR,
3944              (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
3945         info->tx_enabled = true;
3946     }
3947 
3948     if (desc_count(info->tbufs[info->tbuf_start])) {
3949         info->drop_rts_on_tx_done = false;
3950 
3951         if (info->params.mode != MGSL_MODE_ASYNC) {
3952             if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
3953                 get_gtsignals(info);
3954                 if (!(info->signals & SerialSignal_RTS)) {
3955                     info->signals |= SerialSignal_RTS;
3956                     set_gtsignals(info);
3957                     info->drop_rts_on_tx_done = true;
3958                 }
3959             }
3960 
3961             slgt_irq_off(info, IRQ_TXDATA);
3962             slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
3963             /* clear tx idle and underrun status bits */
3964             wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3965         } else {
3966             slgt_irq_off(info, IRQ_TXDATA);
3967             slgt_irq_on(info, IRQ_TXIDLE);
3968             /* clear tx idle status bit */
3969             wr_reg16(info, SSR, IRQ_TXIDLE);
3970         }
3971         /* set 1st descriptor address and start DMA */
3972         wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3973         wr_reg32(info, TDCSR, BIT2 + BIT0);
3974         info->tx_active = true;
3975     }
3976 }
3977 
3978 static void tx_stop(struct slgt_info *info)
3979 {
3980     unsigned short val;
3981 
3982     del_timer(&info->tx_timer);
3983 
3984     tdma_reset(info);
3985 
3986     /* reset and disable transmitter */
3987     val = rd_reg16(info, TCR) & ~BIT1;          /* clear enable bit */
3988     wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
3989 
3990     slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
3991 
3992     /* clear tx idle and underrun status bit */
3993     wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3994 
3995     reset_tbufs(info);
3996 
3997     info->tx_enabled = false;
3998     info->tx_active = false;
3999 }
4000 
4001 static void reset_port(struct slgt_info *info)
4002 {
4003     if (!info->reg_addr)
4004         return;
4005 
4006     tx_stop(info);
4007     rx_stop(info);
4008 
4009     info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
4010     set_gtsignals(info);
4011 
4012     slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4013 }
4014 
4015 static void reset_adapter(struct slgt_info *info)
4016 {
4017     int i;
4018     for (i=0; i < info->port_count; ++i) {
4019         if (info->port_array[i])
4020             reset_port(info->port_array[i]);
4021     }
4022 }
4023 
4024 static void async_mode(struct slgt_info *info)
4025 {
4026     unsigned short val;
4027 
4028     slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4029     tx_stop(info);
4030     rx_stop(info);
4031 
4032     /* TCR (tx control)
4033      *
4034      * 15..13  mode, 010=async
4035      * 12..10  encoding, 000=NRZ
4036      * 09      parity enable
4037      * 08      1=odd parity, 0=even parity
4038      * 07      1=RTS driver control
4039      * 06      1=break enable
4040      * 05..04  character length
4041      *         00=5 bits
4042      *         01=6 bits
4043      *         10=7 bits
4044      *         11=8 bits
4045      * 03      0=1 stop bit, 1=2 stop bits
4046      * 02      reset
4047      * 01      enable
4048      * 00      auto-CTS enable
4049      */
4050     val = 0x4000;
4051 
4052     if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4053         val |= BIT7;
4054 
4055     if (info->params.parity != ASYNC_PARITY_NONE) {
4056         val |= BIT9;
4057         if (info->params.parity == ASYNC_PARITY_ODD)
4058             val |= BIT8;
4059     }
4060 
4061     switch (info->params.data_bits)
4062     {
4063     case 6: val |= BIT4; break;
4064     case 7: val |= BIT5; break;
4065     case 8: val |= BIT5 + BIT4; break;
4066     }
4067 
4068     if (info->params.stop_bits != 1)
4069         val |= BIT3;
4070 
4071     if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4072         val |= BIT0;
4073 
4074     wr_reg16(info, TCR, val);
4075 
4076     /* RCR (rx control)
4077      *
4078      * 15..13  mode, 010=async
4079      * 12..10  encoding, 000=NRZ
4080      * 09      parity enable
4081      * 08      1=odd parity, 0=even parity
4082      * 07..06  reserved, must be 0
4083      * 05..04  character length
4084      *         00=5 bits
4085      *         01=6 bits
4086      *         10=7 bits
4087      *         11=8 bits
4088      * 03      reserved, must be zero
4089      * 02      reset
4090      * 01      enable
4091      * 00      auto-DCD enable
4092      */
4093     val = 0x4000;
4094 
4095     if (info->params.parity != ASYNC_PARITY_NONE) {
4096         val |= BIT9;
4097         if (info->params.parity == ASYNC_PARITY_ODD)
4098             val |= BIT8;
4099     }
4100 
4101     switch (info->params.data_bits)
4102     {
4103     case 6: val |= BIT4; break;
4104     case 7: val |= BIT5; break;
4105     case 8: val |= BIT5 + BIT4; break;
4106     }
4107 
4108     if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4109         val |= BIT0;
4110 
4111     wr_reg16(info, RCR, val);
4112 
4113     /* CCR (clock control)
4114      *
4115      * 07..05  011 = tx clock source is BRG/16
4116      * 04..02  010 = rx clock source is BRG
4117      * 01      0 = auxclk disabled
4118      * 00      1 = BRG enabled
4119      *
4120      * 0110 1001
4121      */
4122     wr_reg8(info, CCR, 0x69);
4123 
4124     msc_set_vcr(info);
4125 
4126     /* SCR (serial control)
4127      *
4128      * 15  1=tx req on FIFO half empty
4129      * 14  1=rx req on FIFO half full
4130      * 13  tx data  IRQ enable
4131      * 12  tx idle  IRQ enable
4132      * 11  rx break on IRQ enable
4133      * 10  rx data  IRQ enable
4134      * 09  rx break off IRQ enable
4135      * 08  overrun  IRQ enable
4136      * 07  DSR      IRQ enable
4137      * 06  CTS      IRQ enable
4138      * 05  DCD      IRQ enable
4139      * 04  RI       IRQ enable
4140      * 03  0=16x sampling, 1=8x sampling
4141      * 02  1=txd->rxd internal loopback enable
4142      * 01  reserved, must be zero
4143      * 00  1=master IRQ enable
4144      */
4145     val = BIT15 + BIT14 + BIT0;
4146     /* JCR[8] : 1 = x8 async mode feature available */
4147     if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
4148         ((info->base_clock < (info->params.data_rate * 16)) ||
4149          (info->base_clock % (info->params.data_rate * 16)))) {
4150         /* use 8x sampling */
4151         val |= BIT3;
4152         set_rate(info, info->params.data_rate * 8);
4153     } else {
4154         /* use 16x sampling */
4155         set_rate(info, info->params.data_rate * 16);
4156     }
4157     wr_reg16(info, SCR, val);
4158 
4159     slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
4160 
4161     if (info->params.loopback)
4162         enable_loopback(info);
4163 }
4164 
4165 static void sync_mode(struct slgt_info *info)
4166 {
4167     unsigned short val;
4168 
4169     slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4170     tx_stop(info);
4171     rx_stop(info);
4172 
4173     /* TCR (tx control)
4174      *
4175      * 15..13  mode
4176      *         000=HDLC/SDLC
4177      *         001=raw bit synchronous
4178      *         010=asynchronous/isochronous
4179      *         011=monosync byte synchronous
4180      *         100=bisync byte synchronous
4181      *         101=xsync byte synchronous
4182      * 12..10  encoding
4183      * 09      CRC enable
4184      * 08      CRC32
4185      * 07      1=RTS driver control
4186      * 06      preamble enable
4187      * 05..04  preamble length
4188      * 03      share open/close flag
4189      * 02      reset
4190      * 01      enable
4191      * 00      auto-CTS enable
4192      */
4193     val = BIT2;
4194 
4195     switch(info->params.mode) {
4196     case MGSL_MODE_XSYNC:
4197         val |= BIT15 + BIT13;
4198         break;
4199     case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4200     case MGSL_MODE_BISYNC:   val |= BIT15; break;
4201     case MGSL_MODE_RAW:      val |= BIT13; break;
4202     }
4203     if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4204         val |= BIT7;
4205 
4206     switch(info->params.encoding)
4207     {
4208     case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4209     case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4210     case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4211     case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4212     case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4213     case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4214     case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4215     }
4216 
4217     switch (info->params.crc_type & HDLC_CRC_MASK)
4218     {
4219     case HDLC_CRC_16_CCITT: val |= BIT9; break;
4220     case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4221     }
4222 
4223     if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
4224         val |= BIT6;
4225 
4226     switch (info->params.preamble_length)
4227     {
4228     case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
4229     case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
4230     case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
4231     }
4232 
4233     if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4234         val |= BIT0;
4235 
4236     wr_reg16(info, TCR, val);
4237 
4238     /* TPR (transmit preamble) */
4239 
4240     switch (info->params.preamble)
4241     {
4242     case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
4243     case HDLC_PREAMBLE_PATTERN_ONES:  val = 0xff; break;
4244     case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
4245     case HDLC_PREAMBLE_PATTERN_10:    val = 0x55; break;
4246     case HDLC_PREAMBLE_PATTERN_01:    val = 0xaa; break;
4247     default:                          val = 0x7e; break;
4248     }
4249     wr_reg8(info, TPR, (unsigned char)val);
4250 
4251     /* RCR (rx control)
4252      *
4253      * 15..13  mode
4254      *         000=HDLC/SDLC
4255      *         001=raw bit synchronous
4256      *         010=asynchronous/isochronous
4257      *         011=monosync byte synchronous
4258      *         100=bisync byte synchronous
4259      *         101=xsync byte synchronous
4260      * 12..10  encoding
4261      * 09      CRC enable
4262      * 08      CRC32
4263      * 07..03  reserved, must be 0
4264      * 02      reset
4265      * 01      enable
4266      * 00      auto-DCD enable
4267      */
4268     val = 0;
4269 
4270     switch(info->params.mode) {
4271     case MGSL_MODE_XSYNC:
4272         val |= BIT15 + BIT13;
4273         break;
4274     case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4275     case MGSL_MODE_BISYNC:   val |= BIT15; break;
4276     case MGSL_MODE_RAW:      val |= BIT13; break;
4277     }
4278 
4279     switch(info->params.encoding)
4280     {
4281     case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4282     case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4283     case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4284     case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4285     case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4286     case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4287     case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4288     }
4289 
4290     switch (info->params.crc_type & HDLC_CRC_MASK)
4291     {
4292     case HDLC_CRC_16_CCITT: val |= BIT9; break;
4293     case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4294     }
4295 
4296     if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4297         val |= BIT0;
4298 
4299     wr_reg16(info, RCR, val);
4300 
4301     /* CCR (clock control)
4302      *
4303      * 07..05  tx clock source
4304      * 04..02  rx clock source
4305      * 01      auxclk enable
4306      * 00      BRG enable
4307      */
4308     val = 0;
4309 
4310     if (info->params.flags & HDLC_FLAG_TXC_BRG)
4311     {
4312         // when RxC source is DPLL, BRG generates 16X DPLL
4313         // reference clock, so take TxC from BRG/16 to get
4314         // transmit clock at actual data rate
4315         if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4316             val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
4317         else
4318             val |= BIT6;    /* 010, txclk = BRG */
4319     }
4320     else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
4321         val |= BIT7;    /* 100, txclk = DPLL Input */
4322     else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4323         val |= BIT5;    /* 001, txclk = RXC Input */
4324 
4325     if (info->params.flags & HDLC_FLAG_RXC_BRG)
4326         val |= BIT3;    /* 010, rxclk = BRG */
4327     else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4328         val |= BIT4;    /* 100, rxclk = DPLL */
4329     else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4330         val |= BIT2;    /* 001, rxclk = TXC Input */
4331 
4332     if (info->params.clock_speed)
4333         val |= BIT1 + BIT0;
4334 
4335     wr_reg8(info, CCR, (unsigned char)val);
4336 
4337     if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
4338     {
4339         // program DPLL mode
4340         switch(info->params.encoding)
4341         {
4342         case HDLC_ENCODING_BIPHASE_MARK:
4343         case HDLC_ENCODING_BIPHASE_SPACE:
4344             val = BIT7; break;
4345         case HDLC_ENCODING_BIPHASE_LEVEL:
4346         case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
4347             val = BIT7 + BIT6; break;
4348         default: val = BIT6;    // NRZ encodings
4349         }
4350         wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
4351 
4352         // DPLL requires a 16X reference clock from BRG
4353         set_rate(info, info->params.clock_speed * 16);
4354     }
4355     else
4356         set_rate(info, info->params.clock_speed);
4357 
4358     tx_set_idle(info);
4359 
4360     msc_set_vcr(info);
4361 
4362     /* SCR (serial control)
4363      *
4364      * 15  1=tx req on FIFO half empty
4365      * 14  1=rx req on FIFO half full
4366      * 13  tx data  IRQ enable
4367      * 12  tx idle  IRQ enable
4368      * 11  underrun IRQ enable
4369      * 10  rx data  IRQ enable
4370      * 09  rx idle  IRQ enable
4371      * 08  overrun  IRQ enable
4372      * 07  DSR      IRQ enable
4373      * 06  CTS      IRQ enable
4374      * 05  DCD      IRQ enable
4375      * 04  RI       IRQ enable
4376      * 03  reserved, must be zero
4377      * 02  1=txd->rxd internal loopback enable
4378      * 01  reserved, must be zero
4379      * 00  1=master IRQ enable
4380      */
4381     wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
4382 
4383     if (info->params.loopback)
4384         enable_loopback(info);
4385 }
4386 
4387 /*
4388  *  set transmit idle mode
4389  */
4390 static void tx_set_idle(struct slgt_info *info)
4391 {
4392     unsigned char val;
4393     unsigned short tcr;
4394 
4395     /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
4396      * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
4397      */
4398     tcr = rd_reg16(info, TCR);
4399     if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
4400         /* disable preamble, set idle size to 16 bits */
4401         tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
4402         /* MSB of 16 bit idle specified in tx preamble register (TPR) */
4403         wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
4404     } else if (!(tcr & BIT6)) {
4405         /* preamble is disabled, set idle size to 8 bits */
4406         tcr &= ~(BIT5 + BIT4);
4407     }
4408     wr_reg16(info, TCR, tcr);
4409 
4410     if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
4411         /* LSB of custom tx idle specified in tx idle register */
4412         val = (unsigned char)(info->idle_mode & 0xff);
4413     } else {
4414         /* standard 8 bit idle patterns */
4415         switch(info->idle_mode)
4416         {
4417         case HDLC_TXIDLE_FLAGS:          val = 0x7e; break;
4418         case HDLC_TXIDLE_ALT_ZEROS_ONES:
4419         case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
4420         case HDLC_TXIDLE_ZEROS:
4421         case HDLC_TXIDLE_SPACE:          val = 0x00; break;
4422         default:                         val = 0xff;
4423         }
4424     }
4425 
4426     wr_reg8(info, TIR, val);
4427 }
4428 
4429 /*
4430  * get state of V24 status (input) signals
4431  */
4432 static void get_gtsignals(struct slgt_info *info)
4433 {
4434     unsigned short status = rd_reg16(info, SSR);
4435 
4436     /* clear all serial signals except RTS and DTR */
4437     info->signals &= SerialSignal_RTS | SerialSignal_DTR;
4438 
4439     if (status & BIT3)
4440         info->signals |= SerialSignal_DSR;
4441     if (status & BIT2)
4442         info->signals |= SerialSignal_CTS;
4443     if (status & BIT1)
4444         info->signals |= SerialSignal_DCD;
4445     if (status & BIT0)
4446         info->signals |= SerialSignal_RI;
4447 }
4448 
4449 /*
4450  * set V.24 Control Register based on current configuration
4451  */
4452 static void msc_set_vcr(struct slgt_info *info)
4453 {
4454     unsigned char val = 0;
4455 
4456     /* VCR (V.24 control)
4457      *
4458      * 07..04  serial IF select
4459      * 03      DTR
4460      * 02      RTS
4461      * 01      LL
4462      * 00      RL
4463      */
4464 
4465     switch(info->if_mode & MGSL_INTERFACE_MASK)
4466     {
4467     case MGSL_INTERFACE_RS232:
4468         val |= BIT5; /* 0010 */
4469         break;
4470     case MGSL_INTERFACE_V35:
4471         val |= BIT7 + BIT6 + BIT5; /* 1110 */
4472         break;
4473     case MGSL_INTERFACE_RS422:
4474         val |= BIT6; /* 0100 */
4475         break;
4476     }
4477 
4478     if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
4479         val |= BIT4;
4480     if (info->signals & SerialSignal_DTR)
4481         val |= BIT3;
4482     if (info->signals & SerialSignal_RTS)
4483         val |= BIT2;
4484     if (info->if_mode & MGSL_INTERFACE_LL)
4485         val |= BIT1;
4486     if (info->if_mode & MGSL_INTERFACE_RL)
4487         val |= BIT0;
4488     wr_reg8(info, VCR, val);
4489 }
4490 
4491 /*
4492  * set state of V24 control (output) signals
4493  */
4494 static void set_gtsignals(struct slgt_info *info)
4495 {
4496     unsigned char val = rd_reg8(info, VCR);
4497     if (info->signals & SerialSignal_DTR)
4498         val |= BIT3;
4499     else
4500         val &= ~BIT3;
4501     if (info->signals & SerialSignal_RTS)
4502         val |= BIT2;
4503     else
4504         val &= ~BIT2;
4505     wr_reg8(info, VCR, val);
4506 }
4507 
4508 /*
4509  * free range of receive DMA buffers (i to last)
4510  */
4511 static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
4512 {
4513     int done = 0;
4514 
4515     while(!done) {
4516         /* reset current buffer for reuse */
4517         info->rbufs[i].status = 0;
4518         set_desc_count(info->rbufs[i], info->rbuf_fill_level);
4519         if (i == last)
4520             done = 1;
4521         if (++i == info->rbuf_count)
4522             i = 0;
4523     }
4524     info->rbuf_current = i;
4525 }
4526 
4527 /*
4528  * mark all receive DMA buffers as free
4529  */
4530 static void reset_rbufs(struct slgt_info *info)
4531 {
4532     free_rbufs(info, 0, info->rbuf_count - 1);
4533     info->rbuf_fill_index = 0;
4534     info->rbuf_fill_count = 0;
4535 }
4536 
4537 /*
4538  * pass receive HDLC frame to upper layer
4539  *
4540  * return true if frame available, otherwise false
4541  */
4542 static bool rx_get_frame(struct slgt_info *info)
4543 {
4544     unsigned int start, end;
4545     unsigned short status;
4546     unsigned int framesize = 0;
4547     unsigned long flags;
4548     struct tty_struct *tty = info->port.tty;
4549     unsigned char addr_field = 0xff;
4550     unsigned int crc_size = 0;
4551 
4552     switch (info->params.crc_type & HDLC_CRC_MASK) {
4553     case HDLC_CRC_16_CCITT: crc_size = 2; break;
4554     case HDLC_CRC_32_CCITT: crc_size = 4; break;
4555     }
4556 
4557 check_again:
4558 
4559     framesize = 0;
4560     addr_field = 0xff;
4561     start = end = info->rbuf_current;
4562 
4563     for (;;) {
4564         if (!desc_complete(info->rbufs[end]))
4565             goto cleanup;
4566 
4567         if (framesize == 0 && info->params.addr_filter != 0xff)
4568             addr_field = info->rbufs[end].buf[0];
4569 
4570         framesize += desc_count(info->rbufs[end]);
4571 
4572         if (desc_eof(info->rbufs[end]))
4573             break;
4574 
4575         if (++end == info->rbuf_count)
4576             end = 0;
4577 
4578         if (end == info->rbuf_current) {
4579             if (info->rx_enabled){
4580                 spin_lock_irqsave(&info->lock,flags);
4581                 rx_start(info);
4582                 spin_unlock_irqrestore(&info->lock,flags);
4583             }
4584             goto cleanup;
4585         }
4586     }
4587 
4588     /* status
4589      *
4590      * 15      buffer complete
4591      * 14..06  reserved
4592      * 05..04  residue
4593      * 02      eof (end of frame)
4594      * 01      CRC error
4595      * 00      abort
4596      */
4597     status = desc_status(info->rbufs[end]);
4598 
4599     /* ignore CRC bit if not using CRC (bit is undefined) */
4600     if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
4601         status &= ~BIT1;
4602 
4603     if (framesize == 0 ||
4604          (addr_field != 0xff && addr_field != info->params.addr_filter)) {
4605         free_rbufs(info, start, end);
4606         goto check_again;
4607     }
4608 
4609     if (framesize < (2 + crc_size) || status & BIT0) {
4610         info->icount.rxshort++;
4611         framesize = 0;
4612     } else if (status & BIT1) {
4613         info->icount.rxcrc++;
4614         if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
4615             framesize = 0;
4616     }
4617 
4618 #if SYNCLINK_GENERIC_HDLC
4619     if (framesize == 0) {
4620         info->netdev->stats.rx_errors++;
4621         info->netdev->stats.rx_frame_errors++;
4622     }
4623 #endif
4624 
4625     DBGBH(("%s rx frame status=%04X size=%d\n",
4626         info->device_name, status, framesize));
4627     DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
4628 
4629     if (framesize) {
4630         if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
4631             framesize -= crc_size;
4632             crc_size = 0;
4633         }
4634 
4635         if (framesize > info->max_frame_size + crc_size)
4636             info->icount.rxlong++;
4637         else {
4638             /* copy dma buffer(s) to contiguous temp buffer */
4639             int copy_count = framesize;
4640             int i = start;
4641             unsigned char *p = info->tmp_rbuf;
4642             info->tmp_rbuf_count = framesize;
4643 
4644             info->icount.rxok++;
4645 
4646             while(copy_count) {
4647                 int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
4648                 memcpy(p, info->rbufs[i].buf, partial_count);
4649                 p += partial_count;
4650                 copy_count -= partial_count;
4651                 if (++i == info->rbuf_count)
4652                     i = 0;
4653             }
4654 
4655             if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
4656                 *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
4657                 framesize++;
4658             }
4659 
4660 #if SYNCLINK_GENERIC_HDLC
4661             if (info->netcount)
4662                 hdlcdev_rx(info,info->tmp_rbuf, framesize);
4663             else
4664 #endif
4665                 ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
4666         }
4667     }
4668     free_rbufs(info, start, end);
4669     return true;
4670 
4671 cleanup:
4672     return false;
4673 }
4674 
4675 /*
4676  * pass receive buffer (RAW synchronous mode) to tty layer
4677  * return true if buffer available, otherwise false
4678  */
4679 static bool rx_get_buf(struct slgt_info *info)
4680 {
4681     unsigned int i = info->rbuf_current;
4682     unsigned int count;
4683 
4684     if (!desc_complete(info->rbufs[i]))
4685         return false;
4686     count = desc_count(info->rbufs[i]);
4687     switch(info->params.mode) {
4688     case MGSL_MODE_MONOSYNC:
4689     case MGSL_MODE_BISYNC:
4690     case MGSL_MODE_XSYNC:
4691         /* ignore residue in byte synchronous modes */
4692         if (desc_residue(info->rbufs[i]))
4693             count--;
4694         break;
4695     }
4696     DBGDATA(info, info->rbufs[i].buf, count, "rx");
4697     DBGINFO(("rx_get_buf size=%d\n", count));
4698     if (count)
4699         ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
4700                   info->flag_buf, count);
4701     free_rbufs(info, i, i);
4702     return true;
4703 }
4704 
4705 static void reset_tbufs(struct slgt_info *info)
4706 {
4707     unsigned int i;
4708     info->tbuf_current = 0;
4709     for (i=0 ; i < info->tbuf_count ; i++) {
4710         info->tbufs[i].status = 0;
4711         info->tbufs[i].count  = 0;
4712     }
4713 }
4714 
4715 /*
4716  * return number of free transmit DMA buffers
4717  */
4718 static unsigned int free_tbuf_count(struct slgt_info *info)
4719 {
4720     unsigned int count = 0;
4721     unsigned int i = info->tbuf_current;
4722 
4723     do
4724     {
4725         if (desc_count(info->tbufs[i]))
4726             break; /* buffer in use */
4727         ++count;
4728         if (++i == info->tbuf_count)
4729             i=0;
4730     } while (i != info->tbuf_current);
4731 
4732     /* if tx DMA active, last zero count buffer is in use */
4733     if (count && (rd_reg32(info, TDCSR) & BIT0))
4734         --count;
4735 
4736     return count;
4737 }
4738 
4739 /*
4740  * return number of bytes in unsent transmit DMA buffers
4741  * and the serial controller tx FIFO
4742  */
4743 static unsigned int tbuf_bytes(struct slgt_info *info)
4744 {
4745     unsigned int total_count = 0;
4746     unsigned int i = info->tbuf_current;
4747     unsigned int reg_value;
4748     unsigned int count;
4749     unsigned int active_buf_count = 0;
4750 
4751     /*
4752      * Add descriptor counts for all tx DMA buffers.
4753      * If count is zero (cleared by DMA controller after read),
4754      * the buffer is complete or is actively being read from.
4755      *
4756      * Record buf_count of last buffer with zero count starting
4757      * from current ring position. buf_count is mirror
4758      * copy of count and is not cleared by serial controller.
4759      * If DMA controller is active, that buffer is actively
4760      * being read so add to total.
4761      */
4762     do {
4763         count = desc_count(info->tbufs[i]);
4764         if (count)
4765             total_count += count;
4766         else if (!total_count)
4767             active_buf_count = info->tbufs[i].buf_count;
4768         if (++i == info->tbuf_count)
4769             i = 0;
4770     } while (i != info->tbuf_current);
4771 
4772     /* read tx DMA status register */
4773     reg_value = rd_reg32(info, TDCSR);
4774 
4775     /* if tx DMA active, last zero count buffer is in use */
4776     if (reg_value & BIT0)
4777         total_count += active_buf_count;
4778 
4779     /* add tx FIFO count = reg_value[15..8] */
4780     total_count += (reg_value >> 8) & 0xff;
4781 
4782     /* if transmitter active add one byte for shift register */
4783     if (info->tx_active)
4784         total_count++;
4785 
4786     return total_count;
4787 }
4788 
4789 /*
4790  * load data into transmit DMA buffer ring and start transmitter if needed
4791  * return true if data accepted, otherwise false (buffers full)
4792  */
4793 static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4794 {
4795     unsigned short count;
4796     unsigned int i;
4797     struct slgt_desc *d;
4798 
4799     /* check required buffer space */
4800     if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
4801         return false;
4802 
4803     DBGDATA(info, buf, size, "tx");
4804 
4805     /*
4806      * copy data to one or more DMA buffers in circular ring
4807      * tbuf_start   = first buffer for this data
4808      * tbuf_current = next free buffer
4809      *
4810      * Copy all data before making data visible to DMA controller by
4811      * setting descriptor count of the first buffer.
4812      * This prevents an active DMA controller from reading the first DMA
4813      * buffers of a frame and stopping before the final buffers are filled.
4814      */
4815 
4816     info->tbuf_start = i = info->tbuf_current;
4817 
4818     while (size) {
4819         d = &info->tbufs[i];
4820 
4821         count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4822         memcpy(d->buf, buf, count);
4823 
4824         size -= count;
4825         buf  += count;
4826 
4827         /*
4828          * set EOF bit for last buffer of HDLC frame or
4829          * for every buffer in raw mode
4830          */
4831         if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
4832             info->params.mode == MGSL_MODE_RAW)
4833             set_desc_eof(*d, 1);
4834         else
4835             set_desc_eof(*d, 0);
4836 
4837         /* set descriptor count for all but first buffer */
4838         if (i != info->tbuf_start)
4839             set_desc_count(*d, count);
4840         d->buf_count = count;
4841 
4842         if (++i == info->tbuf_count)
4843             i = 0;
4844     }
4845 
4846     info->tbuf_current = i;
4847 
4848     /* set first buffer count to make new data visible to DMA controller */
4849     d = &info->tbufs[info->tbuf_start];
4850     set_desc_count(*d, d->buf_count);
4851 
4852     /* start transmitter if needed and update transmit timeout */
4853     if (!info->tx_active)
4854         tx_start(info);
4855     update_tx_timer(info);
4856 
4857     return true;
4858 }
4859 
4860 static int register_test(struct slgt_info *info)
4861 {
4862     static unsigned short patterns[] =
4863         {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
4864     static unsigned int count = ARRAY_SIZE(patterns);
4865     unsigned int i;
4866     int rc = 0;
4867 
4868     for (i=0 ; i < count ; i++) {
4869         wr_reg16(info, TIR, patterns[i]);
4870         wr_reg16(info, BDR, patterns[(i+1)%count]);
4871         if ((rd_reg16(info, TIR) != patterns[i]) ||
4872             (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
4873             rc = -ENODEV;
4874             break;
4875         }
4876     }
4877     info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
4878     info->init_error = rc ? 0 : DiagStatus_AddressFailure;
4879     return rc;
4880 }
4881 
4882 static int irq_test(struct slgt_info *info)
4883 {
4884     unsigned long timeout;
4885     unsigned long flags;
4886     struct tty_struct *oldtty = info->port.tty;
4887     u32 speed = info->params.data_rate;
4888 
4889     info->params.data_rate = 921600;
4890     info->port.tty = NULL;
4891 
4892     spin_lock_irqsave(&info->lock, flags);
4893     async_mode(info);
4894     slgt_irq_on(info, IRQ_TXIDLE);
4895 
4896     /* enable transmitter */
4897     wr_reg16(info, TCR,
4898         (unsigned short)(rd_reg16(info, TCR) | BIT1));
4899 
4900     /* write one byte and wait for tx idle */
4901     wr_reg16(info, TDR, 0);
4902 
4903     /* assume failure */
4904     info->init_error = DiagStatus_IrqFailure;
4905     info->irq_occurred = false;
4906 
4907     spin_unlock_irqrestore(&info->lock, flags);
4908 
4909     timeout=100;
4910     while(timeout-- && !info->irq_occurred)
4911         msleep_interruptible(10);
4912 
4913     spin_lock_irqsave(&info->lock,flags);
4914     reset_port(info);
4915     spin_unlock_irqrestore(&info->lock,flags);
4916 
4917     info->params.data_rate = speed;
4918     info->port.tty = oldtty;
4919 
4920     info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
4921     return info->irq_occurred ? 0 : -ENODEV;
4922 }
4923 
4924 static int loopback_test_rx(struct slgt_info *info)
4925 {
4926     unsigned char *src, *dest;
4927     int count;
4928 
4929     if (desc_complete(info->rbufs[0])) {
4930         count = desc_count(info->rbufs[0]);
4931         src   = info->rbufs[0].buf;
4932         dest  = info->tmp_rbuf;
4933 
4934         for( ; count ; count-=2, src+=2) {
4935             /* src=data byte (src+1)=status byte */
4936             if (!(*(src+1) & (BIT9 + BIT8))) {
4937                 *dest = *src;
4938                 dest++;
4939                 info->tmp_rbuf_count++;
4940             }
4941         }
4942         DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
4943         return 1;
4944     }
4945     return 0;
4946 }
4947 
4948 static int loopback_test(struct slgt_info *info)
4949 {
4950 #define TESTFRAMESIZE 20
4951 
4952     unsigned long timeout;
4953     u16 count;
4954     unsigned char buf[TESTFRAMESIZE];
4955     int rc = -ENODEV;
4956     unsigned long flags;
4957 
4958     struct tty_struct *oldtty = info->port.tty;
4959     MGSL_PARAMS params;
4960 
4961     memcpy(&params, &info->params, sizeof(params));
4962 
4963     info->params.mode = MGSL_MODE_ASYNC;
4964     info->params.data_rate = 921600;
4965     info->params.loopback = 1;
4966     info->port.tty = NULL;
4967 
4968     /* build and send transmit frame */
4969     for (count = 0; count < TESTFRAMESIZE; ++count)
4970         buf[count] = (unsigned char)count;
4971 
4972     info->tmp_rbuf_count = 0;
4973     memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
4974 
4975     /* program hardware for HDLC and enabled receiver */
4976     spin_lock_irqsave(&info->lock,flags);
4977     async_mode(info);
4978     rx_start(info);
4979     tx_load(info, buf, count);
4980     spin_unlock_irqrestore(&info->lock, flags);
4981 
4982     /* wait for receive complete */
4983     for (timeout = 100; timeout; --timeout) {
4984         msleep_interruptible(10);
4985         if (loopback_test_rx(info)) {
4986             rc = 0;
4987             break;
4988         }
4989     }
4990 
4991     /* verify received frame length and contents */
4992     if (!rc && (info->tmp_rbuf_count != count ||
4993           memcmp(buf, info->tmp_rbuf, count))) {
4994         rc = -ENODEV;
4995     }
4996 
4997     spin_lock_irqsave(&info->lock,flags);
4998     reset_adapter(info);
4999     spin_unlock_irqrestore(&info->lock,flags);
5000 
5001     memcpy(&info->params, &params, sizeof(info->params));
5002     info->port.tty = oldtty;
5003 
5004     info->init_error = rc ? DiagStatus_DmaFailure : 0;
5005     return rc;
5006 }
5007 
5008 static int adapter_test(struct slgt_info *info)
5009 {
5010     DBGINFO(("testing %s\n", info->device_name));
5011     if (register_test(info) < 0) {
5012         printk("register test failure %s addr=%08X\n",
5013             info->device_name, info->phys_reg_addr);
5014     } else if (irq_test(info) < 0) {
5015         printk("IRQ test failure %s IRQ=%d\n",
5016             info->device_name, info->irq_level);
5017     } else if (loopback_test(info) < 0) {
5018         printk("loopback test failure %s\n", info->device_name);
5019     }
5020     return info->init_error;
5021 }
5022 
5023 /*
5024  * transmit timeout handler
5025  */
5026 static void tx_timeout(struct timer_list *t)
5027 {
5028     struct slgt_info *info = from_timer(info, t, tx_timer);
5029     unsigned long flags;
5030 
5031     DBGINFO(("%s tx_timeout\n", info->device_name));
5032     if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
5033         info->icount.txtimeout++;
5034     }
5035     spin_lock_irqsave(&info->lock,flags);
5036     tx_stop(info);
5037     spin_unlock_irqrestore(&info->lock,flags);
5038 
5039 #if SYNCLINK_GENERIC_HDLC
5040     if (info->netcount)
5041         hdlcdev_tx_done(info);
5042     else
5043 #endif
5044         bh_transmit(info);
5045 }
5046 
5047 /*
5048  * receive buffer polling timer
5049  */
5050 static void rx_timeout(struct timer_list *t)
5051 {
5052     struct slgt_info *info = from_timer(info, t, rx_timer);
5053     unsigned long flags;
5054 
5055     DBGINFO(("%s rx_timeout\n", info->device_name));
5056     spin_lock_irqsave(&info->lock, flags);
5057     info->pending_bh |= BH_RECEIVE;
5058     spin_unlock_irqrestore(&info->lock, flags);
5059     bh_handler(&info->task);
5060 }
5061