Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Driver for Motorola/Freescale IMX serial ports
0004  *
0005  * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
0006  *
0007  * Author: Sascha Hauer <sascha@saschahauer.de>
0008  * Copyright (C) 2004 Pengutronix
0009  */
0010 
0011 #include <linux/module.h>
0012 #include <linux/ioport.h>
0013 #include <linux/init.h>
0014 #include <linux/console.h>
0015 #include <linux/sysrq.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/tty.h>
0018 #include <linux/tty_flip.h>
0019 #include <linux/serial_core.h>
0020 #include <linux/serial.h>
0021 #include <linux/clk.h>
0022 #include <linux/delay.h>
0023 #include <linux/ktime.h>
0024 #include <linux/pinctrl/consumer.h>
0025 #include <linux/rational.h>
0026 #include <linux/slab.h>
0027 #include <linux/of.h>
0028 #include <linux/of_device.h>
0029 #include <linux/io.h>
0030 #include <linux/dma-mapping.h>
0031 
0032 #include <asm/irq.h>
0033 #include <linux/dma/imx-dma.h>
0034 
0035 #include "serial_mctrl_gpio.h"
0036 
0037 /* Register definitions */
0038 #define URXD0 0x0  /* Receiver Register */
0039 #define URTX0 0x40 /* Transmitter Register */
0040 #define UCR1  0x80 /* Control Register 1 */
0041 #define UCR2  0x84 /* Control Register 2 */
0042 #define UCR3  0x88 /* Control Register 3 */
0043 #define UCR4  0x8c /* Control Register 4 */
0044 #define UFCR  0x90 /* FIFO Control Register */
0045 #define USR1  0x94 /* Status Register 1 */
0046 #define USR2  0x98 /* Status Register 2 */
0047 #define UESC  0x9c /* Escape Character Register */
0048 #define UTIM  0xa0 /* Escape Timer Register */
0049 #define UBIR  0xa4 /* BRM Incremental Register */
0050 #define UBMR  0xa8 /* BRM Modulator Register */
0051 #define UBRC  0xac /* Baud Rate Count Register */
0052 #define IMX21_ONEMS 0xb0 /* One Millisecond register */
0053 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
0054 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
0055 
0056 /* UART Control Register Bit Fields.*/
0057 #define URXD_DUMMY_READ (1<<16)
0058 #define URXD_CHARRDY    (1<<15)
0059 #define URXD_ERR    (1<<14)
0060 #define URXD_OVRRUN (1<<13)
0061 #define URXD_FRMERR (1<<12)
0062 #define URXD_BRK    (1<<11)
0063 #define URXD_PRERR  (1<<10)
0064 #define URXD_RX_DATA    (0xFF<<0)
0065 #define UCR1_ADEN   (1<<15) /* Auto detect interrupt */
0066 #define UCR1_ADBR   (1<<14) /* Auto detect baud rate */
0067 #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
0068 #define UCR1_IDEN   (1<<12) /* Idle condition interrupt */
0069 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
0070 #define UCR1_RRDYEN (1<<9)  /* Recv ready interrupt enable */
0071 #define UCR1_RXDMAEN    (1<<8)  /* Recv ready DMA enable */
0072 #define UCR1_IREN   (1<<7)  /* Infrared interface enable */
0073 #define UCR1_TXMPTYEN   (1<<6)  /* Transimitter empty interrupt enable */
0074 #define UCR1_RTSDEN (1<<5)  /* RTS delta interrupt enable */
0075 #define UCR1_SNDBRK (1<<4)  /* Send break */
0076 #define UCR1_TXDMAEN    (1<<3)  /* Transmitter ready DMA enable */
0077 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
0078 #define UCR1_ATDMAEN    (1<<2)  /* Aging DMA Timer Enable */
0079 #define UCR1_DOZE   (1<<1)  /* Doze */
0080 #define UCR1_UARTEN (1<<0)  /* UART enabled */
0081 #define UCR2_ESCI   (1<<15) /* Escape seq interrupt enable */
0082 #define UCR2_IRTS   (1<<14) /* Ignore RTS pin */
0083 #define UCR2_CTSC   (1<<13) /* CTS pin control */
0084 #define UCR2_CTS    (1<<12) /* Clear to send */
0085 #define UCR2_ESCEN  (1<<11) /* Escape enable */
0086 #define UCR2_PREN   (1<<8)  /* Parity enable */
0087 #define UCR2_PROE   (1<<7)  /* Parity odd/even */
0088 #define UCR2_STPB   (1<<6)  /* Stop */
0089 #define UCR2_WS     (1<<5)  /* Word size */
0090 #define UCR2_RTSEN  (1<<4)  /* Request to send interrupt enable */
0091 #define UCR2_ATEN   (1<<3)  /* Aging Timer Enable */
0092 #define UCR2_TXEN   (1<<2)  /* Transmitter enabled */
0093 #define UCR2_RXEN   (1<<1)  /* Receiver enabled */
0094 #define UCR2_SRST   (1<<0)  /* SW reset */
0095 #define UCR3_DTREN  (1<<13) /* DTR interrupt enable */
0096 #define UCR3_PARERREN   (1<<12) /* Parity enable */
0097 #define UCR3_FRAERREN   (1<<11) /* Frame error interrupt enable */
0098 #define UCR3_DSR    (1<<10) /* Data set ready */
0099 #define UCR3_DCD    (1<<9)  /* Data carrier detect */
0100 #define UCR3_RI     (1<<8)  /* Ring indicator */
0101 #define UCR3_ADNIMP (1<<7)  /* Autobaud Detection Not Improved */
0102 #define UCR3_RXDSEN (1<<6)  /* Receive status interrupt enable */
0103 #define UCR3_AIRINTEN   (1<<5)  /* Async IR wake interrupt enable */
0104 #define UCR3_AWAKEN (1<<4)  /* Async wake interrupt enable */
0105 #define UCR3_DTRDEN (1<<3)  /* Data Terminal Ready Delta Enable. */
0106 #define IMX21_UCR3_RXDMUXSEL    (1<<2)  /* RXD Muxed Input Select */
0107 #define UCR3_INVT   (1<<1)  /* Inverted Infrared transmission */
0108 #define UCR3_BPEN   (1<<0)  /* Preset registers enable */
0109 #define UCR4_CTSTL_SHF  10  /* CTS trigger level shift */
0110 #define UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
0111 #define UCR4_INVR   (1<<9)  /* Inverted infrared reception */
0112 #define UCR4_ENIRI  (1<<8)  /* Serial infrared interrupt enable */
0113 #define UCR4_WKEN   (1<<7)  /* Wake interrupt enable */
0114 #define UCR4_REF16  (1<<6)  /* Ref freq 16 MHz */
0115 #define UCR4_IDDMAEN    (1<<6)  /* DMA IDLE Condition Detected */
0116 #define UCR4_IRSC   (1<<5)  /* IR special case */
0117 #define UCR4_TCEN   (1<<3)  /* Transmit complete interrupt enable */
0118 #define UCR4_BKEN   (1<<2)  /* Break condition interrupt enable */
0119 #define UCR4_OREN   (1<<1)  /* Receiver overrun interrupt enable */
0120 #define UCR4_DREN   (1<<0)  /* Recv data ready interrupt enable */
0121 #define UFCR_RXTL_SHF   0   /* Receiver trigger level shift */
0122 #define UFCR_DCEDTE (1<<6)  /* DCE/DTE mode select */
0123 #define UFCR_RFDIV  (7<<7)  /* Reference freq divider mask */
0124 #define UFCR_RFDIV_REG(x)   (((x) < 7 ? 6 - (x) : 6) << 7)
0125 #define UFCR_TXTL_SHF   10  /* Transmitter trigger level shift */
0126 #define USR1_PARITYERR  (1<<15) /* Parity error interrupt flag */
0127 #define USR1_RTSS   (1<<14) /* RTS pin status */
0128 #define USR1_TRDY   (1<<13) /* Transmitter ready interrupt/dma flag */
0129 #define USR1_RTSD   (1<<12) /* RTS delta */
0130 #define USR1_ESCF   (1<<11) /* Escape seq interrupt flag */
0131 #define USR1_FRAMERR    (1<<10) /* Frame error interrupt flag */
0132 #define USR1_RRDY   (1<<9)   /* Receiver ready interrupt/dma flag */
0133 #define USR1_AGTIM  (1<<8)   /* Ageing timer interrupt flag */
0134 #define USR1_DTRD   (1<<7)   /* DTR Delta */
0135 #define USR1_RXDS    (1<<6)  /* Receiver idle interrupt flag */
0136 #define USR1_AIRINT  (1<<5)  /* Async IR wake interrupt flag */
0137 #define USR1_AWAKE   (1<<4)  /* Aysnc wake interrupt flag */
0138 #define USR2_ADET    (1<<15) /* Auto baud rate detect complete */
0139 #define USR2_TXFE    (1<<14) /* Transmit buffer FIFO empty */
0140 #define USR2_DTRF    (1<<13) /* DTR edge interrupt flag */
0141 #define USR2_IDLE    (1<<12) /* Idle condition */
0142 #define USR2_RIDELT  (1<<10) /* Ring Interrupt Delta */
0143 #define USR2_RIIN    (1<<9)  /* Ring Indicator Input */
0144 #define USR2_IRINT   (1<<8)  /* Serial infrared interrupt flag */
0145 #define USR2_WAKE    (1<<7)  /* Wake */
0146 #define USR2_DCDIN   (1<<5)  /* Data Carrier Detect Input */
0147 #define USR2_RTSF    (1<<4)  /* RTS edge interrupt flag */
0148 #define USR2_TXDC    (1<<3)  /* Transmitter complete */
0149 #define USR2_BRCD    (1<<2)  /* Break condition */
0150 #define USR2_ORE    (1<<1)   /* Overrun error */
0151 #define USR2_RDR    (1<<0)   /* Recv data ready */
0152 #define UTS_FRCPERR (1<<13) /* Force parity error */
0153 #define UTS_LOOP    (1<<12)  /* Loop tx and rx */
0154 #define UTS_TXEMPTY  (1<<6)  /* TxFIFO empty */
0155 #define UTS_RXEMPTY  (1<<5)  /* RxFIFO empty */
0156 #define UTS_TXFULL   (1<<4)  /* TxFIFO full */
0157 #define UTS_RXFULL   (1<<3)  /* RxFIFO full */
0158 #define UTS_SOFTRST  (1<<0)  /* Software reset */
0159 
0160 /* We've been assigned a range on the "Low-density serial ports" major */
0161 #define SERIAL_IMX_MAJOR    207
0162 #define MINOR_START     16
0163 #define DEV_NAME        "ttymxc"
0164 
0165 /*
0166  * This determines how often we check the modem status signals
0167  * for any change.  They generally aren't connected to an IRQ
0168  * so we have to poll them.  We also check immediately before
0169  * filling the TX fifo incase CTS has been dropped.
0170  */
0171 #define MCTRL_TIMEOUT   (250*HZ/1000)
0172 
0173 #define DRIVER_NAME "IMX-uart"
0174 
0175 #define UART_NR 8
0176 
0177 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
0178 enum imx_uart_type {
0179     IMX1_UART,
0180     IMX21_UART,
0181     IMX53_UART,
0182     IMX6Q_UART,
0183 };
0184 
0185 /* device type dependent stuff */
0186 struct imx_uart_data {
0187     unsigned uts_reg;
0188     enum imx_uart_type devtype;
0189 };
0190 
0191 enum imx_tx_state {
0192     OFF,
0193     WAIT_AFTER_RTS,
0194     SEND,
0195     WAIT_AFTER_SEND,
0196 };
0197 
0198 struct imx_port {
0199     struct uart_port    port;
0200     struct timer_list   timer;
0201     unsigned int        old_status;
0202     unsigned int        have_rtscts:1;
0203     unsigned int        have_rtsgpio:1;
0204     unsigned int        dte_mode:1;
0205     unsigned int        inverted_tx:1;
0206     unsigned int        inverted_rx:1;
0207     struct clk      *clk_ipg;
0208     struct clk      *clk_per;
0209     const struct imx_uart_data *devdata;
0210 
0211     struct mctrl_gpios *gpios;
0212 
0213     /* shadow registers */
0214     unsigned int ucr1;
0215     unsigned int ucr2;
0216     unsigned int ucr3;
0217     unsigned int ucr4;
0218     unsigned int ufcr;
0219 
0220     /* DMA fields */
0221     unsigned int        dma_is_enabled:1;
0222     unsigned int        dma_is_rxing:1;
0223     unsigned int        dma_is_txing:1;
0224     struct dma_chan     *dma_chan_rx, *dma_chan_tx;
0225     struct scatterlist  rx_sgl, tx_sgl[2];
0226     void            *rx_buf;
0227     struct circ_buf     rx_ring;
0228     unsigned int        rx_buf_size;
0229     unsigned int        rx_period_length;
0230     unsigned int        rx_periods;
0231     dma_cookie_t        rx_cookie;
0232     unsigned int        tx_bytes;
0233     unsigned int        dma_tx_nents;
0234     unsigned int            saved_reg[10];
0235     bool            context_saved;
0236 
0237     enum imx_tx_state   tx_state;
0238     struct hrtimer      trigger_start_tx;
0239     struct hrtimer      trigger_stop_tx;
0240 };
0241 
0242 struct imx_port_ucrs {
0243     unsigned int    ucr1;
0244     unsigned int    ucr2;
0245     unsigned int    ucr3;
0246 };
0247 
0248 static struct imx_uart_data imx_uart_devdata[] = {
0249     [IMX1_UART] = {
0250         .uts_reg = IMX1_UTS,
0251         .devtype = IMX1_UART,
0252     },
0253     [IMX21_UART] = {
0254         .uts_reg = IMX21_UTS,
0255         .devtype = IMX21_UART,
0256     },
0257     [IMX53_UART] = {
0258         .uts_reg = IMX21_UTS,
0259         .devtype = IMX53_UART,
0260     },
0261     [IMX6Q_UART] = {
0262         .uts_reg = IMX21_UTS,
0263         .devtype = IMX6Q_UART,
0264     },
0265 };
0266 
0267 static const struct of_device_id imx_uart_dt_ids[] = {
0268     { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
0269     { .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], },
0270     { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
0271     { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
0272     { /* sentinel */ }
0273 };
0274 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
0275 
0276 static void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset)
0277 {
0278     switch (offset) {
0279     case UCR1:
0280         sport->ucr1 = val;
0281         break;
0282     case UCR2:
0283         sport->ucr2 = val;
0284         break;
0285     case UCR3:
0286         sport->ucr3 = val;
0287         break;
0288     case UCR4:
0289         sport->ucr4 = val;
0290         break;
0291     case UFCR:
0292         sport->ufcr = val;
0293         break;
0294     default:
0295         break;
0296     }
0297     writel(val, sport->port.membase + offset);
0298 }
0299 
0300 static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
0301 {
0302     switch (offset) {
0303     case UCR1:
0304         return sport->ucr1;
0305         break;
0306     case UCR2:
0307         /*
0308          * UCR2_SRST is the only bit in the cached registers that might
0309          * differ from the value that was last written. As it only
0310          * automatically becomes one after being cleared, reread
0311          * conditionally.
0312          */
0313         if (!(sport->ucr2 & UCR2_SRST))
0314             sport->ucr2 = readl(sport->port.membase + offset);
0315         return sport->ucr2;
0316         break;
0317     case UCR3:
0318         return sport->ucr3;
0319         break;
0320     case UCR4:
0321         return sport->ucr4;
0322         break;
0323     case UFCR:
0324         return sport->ufcr;
0325         break;
0326     default:
0327         return readl(sport->port.membase + offset);
0328     }
0329 }
0330 
0331 static inline unsigned imx_uart_uts_reg(struct imx_port *sport)
0332 {
0333     return sport->devdata->uts_reg;
0334 }
0335 
0336 static inline int imx_uart_is_imx1(struct imx_port *sport)
0337 {
0338     return sport->devdata->devtype == IMX1_UART;
0339 }
0340 
0341 static inline int imx_uart_is_imx21(struct imx_port *sport)
0342 {
0343     return sport->devdata->devtype == IMX21_UART;
0344 }
0345 
0346 static inline int imx_uart_is_imx53(struct imx_port *sport)
0347 {
0348     return sport->devdata->devtype == IMX53_UART;
0349 }
0350 
0351 static inline int imx_uart_is_imx6q(struct imx_port *sport)
0352 {
0353     return sport->devdata->devtype == IMX6Q_UART;
0354 }
0355 /*
0356  * Save and restore functions for UCR1, UCR2 and UCR3 registers
0357  */
0358 #if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE)
0359 static void imx_uart_ucrs_save(struct imx_port *sport,
0360                    struct imx_port_ucrs *ucr)
0361 {
0362     /* save control registers */
0363     ucr->ucr1 = imx_uart_readl(sport, UCR1);
0364     ucr->ucr2 = imx_uart_readl(sport, UCR2);
0365     ucr->ucr3 = imx_uart_readl(sport, UCR3);
0366 }
0367 
0368 static void imx_uart_ucrs_restore(struct imx_port *sport,
0369                   struct imx_port_ucrs *ucr)
0370 {
0371     /* restore control registers */
0372     imx_uart_writel(sport, ucr->ucr1, UCR1);
0373     imx_uart_writel(sport, ucr->ucr2, UCR2);
0374     imx_uart_writel(sport, ucr->ucr3, UCR3);
0375 }
0376 #endif
0377 
0378 /* called with port.lock taken and irqs caller dependent */
0379 static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
0380 {
0381     *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
0382 
0383     sport->port.mctrl |= TIOCM_RTS;
0384     mctrl_gpio_set(sport->gpios, sport->port.mctrl);
0385 }
0386 
0387 /* called with port.lock taken and irqs caller dependent */
0388 static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
0389 {
0390     *ucr2 &= ~UCR2_CTSC;
0391     *ucr2 |= UCR2_CTS;
0392 
0393     sport->port.mctrl &= ~TIOCM_RTS;
0394     mctrl_gpio_set(sport->gpios, sport->port.mctrl);
0395 }
0396 
0397 static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
0398 {
0399        hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
0400 }
0401 
0402 /* called with port.lock taken and irqs off */
0403 static void imx_uart_start_rx(struct uart_port *port)
0404 {
0405     struct imx_port *sport = (struct imx_port *)port;
0406     unsigned int ucr1, ucr2;
0407 
0408     ucr1 = imx_uart_readl(sport, UCR1);
0409     ucr2 = imx_uart_readl(sport, UCR2);
0410 
0411     ucr2 |= UCR2_RXEN;
0412 
0413     if (sport->dma_is_enabled) {
0414         ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN;
0415     } else {
0416         ucr1 |= UCR1_RRDYEN;
0417         ucr2 |= UCR2_ATEN;
0418     }
0419 
0420     /* Write UCR2 first as it includes RXEN */
0421     imx_uart_writel(sport, ucr2, UCR2);
0422     imx_uart_writel(sport, ucr1, UCR1);
0423 }
0424 
0425 /* called with port.lock taken and irqs off */
0426 static void imx_uart_stop_tx(struct uart_port *port)
0427 {
0428     struct imx_port *sport = (struct imx_port *)port;
0429     u32 ucr1, ucr4, usr2;
0430 
0431     if (sport->tx_state == OFF)
0432         return;
0433 
0434     /*
0435      * We are maybe in the SMP context, so if the DMA TX thread is running
0436      * on other cpu, we have to wait for it to finish.
0437      */
0438     if (sport->dma_is_txing)
0439         return;
0440 
0441     ucr1 = imx_uart_readl(sport, UCR1);
0442     imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
0443 
0444     usr2 = imx_uart_readl(sport, USR2);
0445     if (!(usr2 & USR2_TXDC)) {
0446         /* The shifter is still busy, so retry once TC triggers */
0447         return;
0448     }
0449 
0450     ucr4 = imx_uart_readl(sport, UCR4);
0451     ucr4 &= ~UCR4_TCEN;
0452     imx_uart_writel(sport, ucr4, UCR4);
0453 
0454     /* in rs485 mode disable transmitter */
0455     if (port->rs485.flags & SER_RS485_ENABLED) {
0456         if (sport->tx_state == SEND) {
0457             sport->tx_state = WAIT_AFTER_SEND;
0458 
0459             if (port->rs485.delay_rts_after_send > 0) {
0460                 start_hrtimer_ms(&sport->trigger_stop_tx,
0461                      port->rs485.delay_rts_after_send);
0462                 return;
0463             }
0464 
0465             /* continue without any delay */
0466         }
0467 
0468         if (sport->tx_state == WAIT_AFTER_RTS ||
0469             sport->tx_state == WAIT_AFTER_SEND) {
0470             u32 ucr2;
0471 
0472             hrtimer_try_to_cancel(&sport->trigger_start_tx);
0473 
0474             ucr2 = imx_uart_readl(sport, UCR2);
0475             if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
0476                 imx_uart_rts_active(sport, &ucr2);
0477             else
0478                 imx_uart_rts_inactive(sport, &ucr2);
0479             imx_uart_writel(sport, ucr2, UCR2);
0480 
0481             imx_uart_start_rx(port);
0482 
0483             sport->tx_state = OFF;
0484         }
0485     } else {
0486         sport->tx_state = OFF;
0487     }
0488 }
0489 
0490 /* called with port.lock taken and irqs off */
0491 static void imx_uart_stop_rx(struct uart_port *port)
0492 {
0493     struct imx_port *sport = (struct imx_port *)port;
0494     u32 ucr1, ucr2, ucr4;
0495 
0496     ucr1 = imx_uart_readl(sport, UCR1);
0497     ucr2 = imx_uart_readl(sport, UCR2);
0498     ucr4 = imx_uart_readl(sport, UCR4);
0499 
0500     if (sport->dma_is_enabled) {
0501         ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
0502     } else {
0503         ucr1 &= ~UCR1_RRDYEN;
0504         ucr2 &= ~UCR2_ATEN;
0505         ucr4 &= ~UCR4_OREN;
0506     }
0507     imx_uart_writel(sport, ucr1, UCR1);
0508     imx_uart_writel(sport, ucr4, UCR4);
0509 
0510     ucr2 &= ~UCR2_RXEN;
0511     imx_uart_writel(sport, ucr2, UCR2);
0512 }
0513 
0514 /* called with port.lock taken and irqs off */
0515 static void imx_uart_enable_ms(struct uart_port *port)
0516 {
0517     struct imx_port *sport = (struct imx_port *)port;
0518 
0519     mod_timer(&sport->timer, jiffies);
0520 
0521     mctrl_gpio_enable_ms(sport->gpios);
0522 }
0523 
0524 static void imx_uart_dma_tx(struct imx_port *sport);
0525 
0526 /* called with port.lock taken and irqs off */
0527 static inline void imx_uart_transmit_buffer(struct imx_port *sport)
0528 {
0529     struct circ_buf *xmit = &sport->port.state->xmit;
0530 
0531     if (sport->port.x_char) {
0532         /* Send next char */
0533         imx_uart_writel(sport, sport->port.x_char, URTX0);
0534         sport->port.icount.tx++;
0535         sport->port.x_char = 0;
0536         return;
0537     }
0538 
0539     if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
0540         imx_uart_stop_tx(&sport->port);
0541         return;
0542     }
0543 
0544     if (sport->dma_is_enabled) {
0545         u32 ucr1;
0546         /*
0547          * We've just sent a X-char Ensure the TX DMA is enabled
0548          * and the TX IRQ is disabled.
0549          **/
0550         ucr1 = imx_uart_readl(sport, UCR1);
0551         ucr1 &= ~UCR1_TRDYEN;
0552         if (sport->dma_is_txing) {
0553             ucr1 |= UCR1_TXDMAEN;
0554             imx_uart_writel(sport, ucr1, UCR1);
0555         } else {
0556             imx_uart_writel(sport, ucr1, UCR1);
0557             imx_uart_dma_tx(sport);
0558         }
0559 
0560         return;
0561     }
0562 
0563     while (!uart_circ_empty(xmit) &&
0564            !(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) {
0565         /* send xmit->buf[xmit->tail]
0566          * out the port here */
0567         imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0);
0568         xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
0569         sport->port.icount.tx++;
0570     }
0571 
0572     if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
0573         uart_write_wakeup(&sport->port);
0574 
0575     if (uart_circ_empty(xmit))
0576         imx_uart_stop_tx(&sport->port);
0577 }
0578 
0579 static void imx_uart_dma_tx_callback(void *data)
0580 {
0581     struct imx_port *sport = data;
0582     struct scatterlist *sgl = &sport->tx_sgl[0];
0583     struct circ_buf *xmit = &sport->port.state->xmit;
0584     unsigned long flags;
0585     u32 ucr1;
0586 
0587     spin_lock_irqsave(&sport->port.lock, flags);
0588 
0589     dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
0590 
0591     ucr1 = imx_uart_readl(sport, UCR1);
0592     ucr1 &= ~UCR1_TXDMAEN;
0593     imx_uart_writel(sport, ucr1, UCR1);
0594 
0595     /* update the stat */
0596     xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
0597     sport->port.icount.tx += sport->tx_bytes;
0598 
0599     dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
0600 
0601     sport->dma_is_txing = 0;
0602 
0603     if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
0604         uart_write_wakeup(&sport->port);
0605 
0606     if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
0607         imx_uart_dma_tx(sport);
0608     else if (sport->port.rs485.flags & SER_RS485_ENABLED) {
0609         u32 ucr4 = imx_uart_readl(sport, UCR4);
0610         ucr4 |= UCR4_TCEN;
0611         imx_uart_writel(sport, ucr4, UCR4);
0612     }
0613 
0614     spin_unlock_irqrestore(&sport->port.lock, flags);
0615 }
0616 
0617 /* called with port.lock taken and irqs off */
0618 static void imx_uart_dma_tx(struct imx_port *sport)
0619 {
0620     struct circ_buf *xmit = &sport->port.state->xmit;
0621     struct scatterlist *sgl = sport->tx_sgl;
0622     struct dma_async_tx_descriptor *desc;
0623     struct dma_chan *chan = sport->dma_chan_tx;
0624     struct device *dev = sport->port.dev;
0625     u32 ucr1, ucr4;
0626     int ret;
0627 
0628     if (sport->dma_is_txing)
0629         return;
0630 
0631     ucr4 = imx_uart_readl(sport, UCR4);
0632     ucr4 &= ~UCR4_TCEN;
0633     imx_uart_writel(sport, ucr4, UCR4);
0634 
0635     sport->tx_bytes = uart_circ_chars_pending(xmit);
0636 
0637     if (xmit->tail < xmit->head || xmit->head == 0) {
0638         sport->dma_tx_nents = 1;
0639         sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
0640     } else {
0641         sport->dma_tx_nents = 2;
0642         sg_init_table(sgl, 2);
0643         sg_set_buf(sgl, xmit->buf + xmit->tail,
0644                 UART_XMIT_SIZE - xmit->tail);
0645         sg_set_buf(sgl + 1, xmit->buf, xmit->head);
0646     }
0647 
0648     ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
0649     if (ret == 0) {
0650         dev_err(dev, "DMA mapping error for TX.\n");
0651         return;
0652     }
0653     desc = dmaengine_prep_slave_sg(chan, sgl, ret,
0654                     DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
0655     if (!desc) {
0656         dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
0657                  DMA_TO_DEVICE);
0658         dev_err(dev, "We cannot prepare for the TX slave dma!\n");
0659         return;
0660     }
0661     desc->callback = imx_uart_dma_tx_callback;
0662     desc->callback_param = sport;
0663 
0664     dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
0665             uart_circ_chars_pending(xmit));
0666 
0667     ucr1 = imx_uart_readl(sport, UCR1);
0668     ucr1 |= UCR1_TXDMAEN;
0669     imx_uart_writel(sport, ucr1, UCR1);
0670 
0671     /* fire it */
0672     sport->dma_is_txing = 1;
0673     dmaengine_submit(desc);
0674     dma_async_issue_pending(chan);
0675     return;
0676 }
0677 
0678 /* called with port.lock taken and irqs off */
0679 static void imx_uart_start_tx(struct uart_port *port)
0680 {
0681     struct imx_port *sport = (struct imx_port *)port;
0682     u32 ucr1;
0683 
0684     if (!sport->port.x_char && uart_circ_empty(&port->state->xmit))
0685         return;
0686 
0687     /*
0688      * We cannot simply do nothing here if sport->tx_state == SEND already
0689      * because UCR1_TXMPTYEN might already have been cleared in
0690      * imx_uart_stop_tx(), but tx_state is still SEND.
0691      */
0692 
0693     if (port->rs485.flags & SER_RS485_ENABLED) {
0694         if (sport->tx_state == OFF) {
0695             u32 ucr2 = imx_uart_readl(sport, UCR2);
0696             if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
0697                 imx_uart_rts_active(sport, &ucr2);
0698             else
0699                 imx_uart_rts_inactive(sport, &ucr2);
0700             imx_uart_writel(sport, ucr2, UCR2);
0701 
0702             if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
0703                 imx_uart_stop_rx(port);
0704 
0705             sport->tx_state = WAIT_AFTER_RTS;
0706 
0707             if (port->rs485.delay_rts_before_send > 0) {
0708                 start_hrtimer_ms(&sport->trigger_start_tx,
0709                      port->rs485.delay_rts_before_send);
0710                 return;
0711             }
0712 
0713             /* continue without any delay */
0714         }
0715 
0716         if (sport->tx_state == WAIT_AFTER_SEND
0717             || sport->tx_state == WAIT_AFTER_RTS) {
0718 
0719             hrtimer_try_to_cancel(&sport->trigger_stop_tx);
0720 
0721             /*
0722              * Enable transmitter and shifter empty irq only if DMA
0723              * is off.  In the DMA case this is done in the
0724              * tx-callback.
0725              */
0726             if (!sport->dma_is_enabled) {
0727                 u32 ucr4 = imx_uart_readl(sport, UCR4);
0728                 ucr4 |= UCR4_TCEN;
0729                 imx_uart_writel(sport, ucr4, UCR4);
0730             }
0731 
0732             sport->tx_state = SEND;
0733         }
0734     } else {
0735         sport->tx_state = SEND;
0736     }
0737 
0738     if (!sport->dma_is_enabled) {
0739         ucr1 = imx_uart_readl(sport, UCR1);
0740         imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1);
0741     }
0742 
0743     if (sport->dma_is_enabled) {
0744         if (sport->port.x_char) {
0745             /* We have X-char to send, so enable TX IRQ and
0746              * disable TX DMA to let TX interrupt to send X-char */
0747             ucr1 = imx_uart_readl(sport, UCR1);
0748             ucr1 &= ~UCR1_TXDMAEN;
0749             ucr1 |= UCR1_TRDYEN;
0750             imx_uart_writel(sport, ucr1, UCR1);
0751             return;
0752         }
0753 
0754         if (!uart_circ_empty(&port->state->xmit) &&
0755             !uart_tx_stopped(port))
0756             imx_uart_dma_tx(sport);
0757         return;
0758     }
0759 }
0760 
0761 static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
0762 {
0763     struct imx_port *sport = dev_id;
0764     u32 usr1;
0765 
0766     imx_uart_writel(sport, USR1_RTSD, USR1);
0767     usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
0768     uart_handle_cts_change(&sport->port, !!usr1);
0769     wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
0770 
0771     return IRQ_HANDLED;
0772 }
0773 
0774 static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
0775 {
0776     struct imx_port *sport = dev_id;
0777     irqreturn_t ret;
0778 
0779     spin_lock(&sport->port.lock);
0780 
0781     ret = __imx_uart_rtsint(irq, dev_id);
0782 
0783     spin_unlock(&sport->port.lock);
0784 
0785     return ret;
0786 }
0787 
0788 static irqreturn_t imx_uart_txint(int irq, void *dev_id)
0789 {
0790     struct imx_port *sport = dev_id;
0791 
0792     spin_lock(&sport->port.lock);
0793     imx_uart_transmit_buffer(sport);
0794     spin_unlock(&sport->port.lock);
0795     return IRQ_HANDLED;
0796 }
0797 
0798 static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
0799 {
0800     struct imx_port *sport = dev_id;
0801     unsigned int rx, flg, ignored = 0;
0802     struct tty_port *port = &sport->port.state->port;
0803 
0804     while (imx_uart_readl(sport, USR2) & USR2_RDR) {
0805         u32 usr2;
0806 
0807         flg = TTY_NORMAL;
0808         sport->port.icount.rx++;
0809 
0810         rx = imx_uart_readl(sport, URXD0);
0811 
0812         usr2 = imx_uart_readl(sport, USR2);
0813         if (usr2 & USR2_BRCD) {
0814             imx_uart_writel(sport, USR2_BRCD, USR2);
0815             if (uart_handle_break(&sport->port))
0816                 continue;
0817         }
0818 
0819         if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
0820             continue;
0821 
0822         if (unlikely(rx & URXD_ERR)) {
0823             if (rx & URXD_BRK)
0824                 sport->port.icount.brk++;
0825             else if (rx & URXD_PRERR)
0826                 sport->port.icount.parity++;
0827             else if (rx & URXD_FRMERR)
0828                 sport->port.icount.frame++;
0829             if (rx & URXD_OVRRUN)
0830                 sport->port.icount.overrun++;
0831 
0832             if (rx & sport->port.ignore_status_mask) {
0833                 if (++ignored > 100)
0834                     goto out;
0835                 continue;
0836             }
0837 
0838             rx &= (sport->port.read_status_mask | 0xFF);
0839 
0840             if (rx & URXD_BRK)
0841                 flg = TTY_BREAK;
0842             else if (rx & URXD_PRERR)
0843                 flg = TTY_PARITY;
0844             else if (rx & URXD_FRMERR)
0845                 flg = TTY_FRAME;
0846             if (rx & URXD_OVRRUN)
0847                 flg = TTY_OVERRUN;
0848 
0849             sport->port.sysrq = 0;
0850         }
0851 
0852         if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
0853             goto out;
0854 
0855         if (tty_insert_flip_char(port, rx, flg) == 0)
0856             sport->port.icount.buf_overrun++;
0857     }
0858 
0859 out:
0860     tty_flip_buffer_push(port);
0861 
0862     return IRQ_HANDLED;
0863 }
0864 
0865 static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
0866 {
0867     struct imx_port *sport = dev_id;
0868     irqreturn_t ret;
0869 
0870     spin_lock(&sport->port.lock);
0871 
0872     ret = __imx_uart_rxint(irq, dev_id);
0873 
0874     spin_unlock(&sport->port.lock);
0875 
0876     return ret;
0877 }
0878 
0879 static void imx_uart_clear_rx_errors(struct imx_port *sport);
0880 
0881 /*
0882  * We have a modem side uart, so the meanings of RTS and CTS are inverted.
0883  */
0884 static unsigned int imx_uart_get_hwmctrl(struct imx_port *sport)
0885 {
0886     unsigned int tmp = TIOCM_DSR;
0887     unsigned usr1 = imx_uart_readl(sport, USR1);
0888     unsigned usr2 = imx_uart_readl(sport, USR2);
0889 
0890     if (usr1 & USR1_RTSS)
0891         tmp |= TIOCM_CTS;
0892 
0893     /* in DCE mode DCDIN is always 0 */
0894     if (!(usr2 & USR2_DCDIN))
0895         tmp |= TIOCM_CAR;
0896 
0897     if (sport->dte_mode)
0898         if (!(imx_uart_readl(sport, USR2) & USR2_RIIN))
0899             tmp |= TIOCM_RI;
0900 
0901     return tmp;
0902 }
0903 
0904 /*
0905  * Handle any change of modem status signal since we were last called.
0906  */
0907 static void imx_uart_mctrl_check(struct imx_port *sport)
0908 {
0909     unsigned int status, changed;
0910 
0911     status = imx_uart_get_hwmctrl(sport);
0912     changed = status ^ sport->old_status;
0913 
0914     if (changed == 0)
0915         return;
0916 
0917     sport->old_status = status;
0918 
0919     if (changed & TIOCM_RI && status & TIOCM_RI)
0920         sport->port.icount.rng++;
0921     if (changed & TIOCM_DSR)
0922         sport->port.icount.dsr++;
0923     if (changed & TIOCM_CAR)
0924         uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
0925     if (changed & TIOCM_CTS)
0926         uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
0927 
0928     wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
0929 }
0930 
0931 static irqreturn_t imx_uart_int(int irq, void *dev_id)
0932 {
0933     struct imx_port *sport = dev_id;
0934     unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
0935     irqreturn_t ret = IRQ_NONE;
0936 
0937     spin_lock(&sport->port.lock);
0938 
0939     usr1 = imx_uart_readl(sport, USR1);
0940     usr2 = imx_uart_readl(sport, USR2);
0941     ucr1 = imx_uart_readl(sport, UCR1);
0942     ucr2 = imx_uart_readl(sport, UCR2);
0943     ucr3 = imx_uart_readl(sport, UCR3);
0944     ucr4 = imx_uart_readl(sport, UCR4);
0945 
0946     /*
0947      * Even if a condition is true that can trigger an irq only handle it if
0948      * the respective irq source is enabled. This prevents some undesired
0949      * actions, for example if a character that sits in the RX FIFO and that
0950      * should be fetched via DMA is tried to be fetched using PIO. Or the
0951      * receiver is currently off and so reading from URXD0 results in an
0952      * exception. So just mask the (raw) status bits for disabled irqs.
0953      */
0954     if ((ucr1 & UCR1_RRDYEN) == 0)
0955         usr1 &= ~USR1_RRDY;
0956     if ((ucr2 & UCR2_ATEN) == 0)
0957         usr1 &= ~USR1_AGTIM;
0958     if ((ucr1 & UCR1_TRDYEN) == 0)
0959         usr1 &= ~USR1_TRDY;
0960     if ((ucr4 & UCR4_TCEN) == 0)
0961         usr2 &= ~USR2_TXDC;
0962     if ((ucr3 & UCR3_DTRDEN) == 0)
0963         usr1 &= ~USR1_DTRD;
0964     if ((ucr1 & UCR1_RTSDEN) == 0)
0965         usr1 &= ~USR1_RTSD;
0966     if ((ucr3 & UCR3_AWAKEN) == 0)
0967         usr1 &= ~USR1_AWAKE;
0968     if ((ucr4 & UCR4_OREN) == 0)
0969         usr2 &= ~USR2_ORE;
0970 
0971     if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
0972         imx_uart_writel(sport, USR1_AGTIM, USR1);
0973 
0974         __imx_uart_rxint(irq, dev_id);
0975         ret = IRQ_HANDLED;
0976     }
0977 
0978     if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
0979         imx_uart_transmit_buffer(sport);
0980         ret = IRQ_HANDLED;
0981     }
0982 
0983     if (usr1 & USR1_DTRD) {
0984         imx_uart_writel(sport, USR1_DTRD, USR1);
0985 
0986         imx_uart_mctrl_check(sport);
0987 
0988         ret = IRQ_HANDLED;
0989     }
0990 
0991     if (usr1 & USR1_RTSD) {
0992         __imx_uart_rtsint(irq, dev_id);
0993         ret = IRQ_HANDLED;
0994     }
0995 
0996     if (usr1 & USR1_AWAKE) {
0997         imx_uart_writel(sport, USR1_AWAKE, USR1);
0998         ret = IRQ_HANDLED;
0999     }
1000 
1001     if (usr2 & USR2_ORE) {
1002         sport->port.icount.overrun++;
1003         imx_uart_writel(sport, USR2_ORE, USR2);
1004         ret = IRQ_HANDLED;
1005     }
1006 
1007     spin_unlock(&sport->port.lock);
1008 
1009     return ret;
1010 }
1011 
1012 /*
1013  * Return TIOCSER_TEMT when transmitter is not busy.
1014  */
1015 static unsigned int imx_uart_tx_empty(struct uart_port *port)
1016 {
1017     struct imx_port *sport = (struct imx_port *)port;
1018     unsigned int ret;
1019 
1020     ret = (imx_uart_readl(sport, USR2) & USR2_TXDC) ?  TIOCSER_TEMT : 0;
1021 
1022     /* If the TX DMA is working, return 0. */
1023     if (sport->dma_is_txing)
1024         ret = 0;
1025 
1026     return ret;
1027 }
1028 
1029 /* called with port.lock taken and irqs off */
1030 static unsigned int imx_uart_get_mctrl(struct uart_port *port)
1031 {
1032     struct imx_port *sport = (struct imx_port *)port;
1033     unsigned int ret = imx_uart_get_hwmctrl(sport);
1034 
1035     mctrl_gpio_get(sport->gpios, &ret);
1036 
1037     return ret;
1038 }
1039 
1040 /* called with port.lock taken and irqs off */
1041 static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
1042 {
1043     struct imx_port *sport = (struct imx_port *)port;
1044     u32 ucr3, uts;
1045 
1046     if (!(port->rs485.flags & SER_RS485_ENABLED)) {
1047         u32 ucr2;
1048 
1049         /*
1050          * Turn off autoRTS if RTS is lowered and restore autoRTS
1051          * setting if RTS is raised.
1052          */
1053         ucr2 = imx_uart_readl(sport, UCR2);
1054         ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
1055         if (mctrl & TIOCM_RTS) {
1056             ucr2 |= UCR2_CTS;
1057             /*
1058              * UCR2_IRTS is unset if and only if the port is
1059              * configured for CRTSCTS, so we use inverted UCR2_IRTS
1060              * to get the state to restore to.
1061              */
1062             if (!(ucr2 & UCR2_IRTS))
1063                 ucr2 |= UCR2_CTSC;
1064         }
1065         imx_uart_writel(sport, ucr2, UCR2);
1066     }
1067 
1068     ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_DSR;
1069     if (!(mctrl & TIOCM_DTR))
1070         ucr3 |= UCR3_DSR;
1071     imx_uart_writel(sport, ucr3, UCR3);
1072 
1073     uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)) & ~UTS_LOOP;
1074     if (mctrl & TIOCM_LOOP)
1075         uts |= UTS_LOOP;
1076     imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
1077 
1078     mctrl_gpio_set(sport->gpios, mctrl);
1079 }
1080 
1081 /*
1082  * Interrupts always disabled.
1083  */
1084 static void imx_uart_break_ctl(struct uart_port *port, int break_state)
1085 {
1086     struct imx_port *sport = (struct imx_port *)port;
1087     unsigned long flags;
1088     u32 ucr1;
1089 
1090     spin_lock_irqsave(&sport->port.lock, flags);
1091 
1092     ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
1093 
1094     if (break_state != 0)
1095         ucr1 |= UCR1_SNDBRK;
1096 
1097     imx_uart_writel(sport, ucr1, UCR1);
1098 
1099     spin_unlock_irqrestore(&sport->port.lock, flags);
1100 }
1101 
1102 /*
1103  * This is our per-port timeout handler, for checking the
1104  * modem status signals.
1105  */
1106 static void imx_uart_timeout(struct timer_list *t)
1107 {
1108     struct imx_port *sport = from_timer(sport, t, timer);
1109     unsigned long flags;
1110 
1111     if (sport->port.state) {
1112         spin_lock_irqsave(&sport->port.lock, flags);
1113         imx_uart_mctrl_check(sport);
1114         spin_unlock_irqrestore(&sport->port.lock, flags);
1115 
1116         mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
1117     }
1118 }
1119 
1120 /*
1121  * There are two kinds of RX DMA interrupts(such as in the MX6Q):
1122  *   [1] the RX DMA buffer is full.
1123  *   [2] the aging timer expires
1124  *
1125  * Condition [2] is triggered when a character has been sitting in the FIFO
1126  * for at least 8 byte durations.
1127  */
1128 static void imx_uart_dma_rx_callback(void *data)
1129 {
1130     struct imx_port *sport = data;
1131     struct dma_chan *chan = sport->dma_chan_rx;
1132     struct scatterlist *sgl = &sport->rx_sgl;
1133     struct tty_port *port = &sport->port.state->port;
1134     struct dma_tx_state state;
1135     struct circ_buf *rx_ring = &sport->rx_ring;
1136     enum dma_status status;
1137     unsigned int w_bytes = 0;
1138     unsigned int r_bytes;
1139     unsigned int bd_size;
1140 
1141     status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
1142 
1143     if (status == DMA_ERROR) {
1144         imx_uart_clear_rx_errors(sport);
1145         return;
1146     }
1147 
1148     if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
1149 
1150         /*
1151          * The state-residue variable represents the empty space
1152          * relative to the entire buffer. Taking this in consideration
1153          * the head is always calculated base on the buffer total
1154          * length - DMA transaction residue. The UART script from the
1155          * SDMA firmware will jump to the next buffer descriptor,
1156          * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
1157          * Taking this in consideration the tail is always at the
1158          * beginning of the buffer descriptor that contains the head.
1159          */
1160 
1161         /* Calculate the head */
1162         rx_ring->head = sg_dma_len(sgl) - state.residue;
1163 
1164         /* Calculate the tail. */
1165         bd_size = sg_dma_len(sgl) / sport->rx_periods;
1166         rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
1167 
1168         if (rx_ring->head <= sg_dma_len(sgl) &&
1169             rx_ring->head > rx_ring->tail) {
1170 
1171             /* Move data from tail to head */
1172             r_bytes = rx_ring->head - rx_ring->tail;
1173 
1174             /* CPU claims ownership of RX DMA buffer */
1175             dma_sync_sg_for_cpu(sport->port.dev, sgl, 1,
1176                 DMA_FROM_DEVICE);
1177 
1178             w_bytes = tty_insert_flip_string(port,
1179                 sport->rx_buf + rx_ring->tail, r_bytes);
1180 
1181             /* UART retrieves ownership of RX DMA buffer */
1182             dma_sync_sg_for_device(sport->port.dev, sgl, 1,
1183                 DMA_FROM_DEVICE);
1184 
1185             if (w_bytes != r_bytes)
1186                 sport->port.icount.buf_overrun++;
1187 
1188             sport->port.icount.rx += w_bytes;
1189         } else  {
1190             WARN_ON(rx_ring->head > sg_dma_len(sgl));
1191             WARN_ON(rx_ring->head <= rx_ring->tail);
1192         }
1193     }
1194 
1195     if (w_bytes) {
1196         tty_flip_buffer_push(port);
1197         dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes);
1198     }
1199 }
1200 
1201 static int imx_uart_start_rx_dma(struct imx_port *sport)
1202 {
1203     struct scatterlist *sgl = &sport->rx_sgl;
1204     struct dma_chan *chan = sport->dma_chan_rx;
1205     struct device *dev = sport->port.dev;
1206     struct dma_async_tx_descriptor *desc;
1207     int ret;
1208 
1209     sport->rx_ring.head = 0;
1210     sport->rx_ring.tail = 0;
1211 
1212     sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
1213     ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
1214     if (ret == 0) {
1215         dev_err(dev, "DMA mapping error for RX.\n");
1216         return -EINVAL;
1217     }
1218 
1219     desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl),
1220         sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods,
1221         DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1222 
1223     if (!desc) {
1224         dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
1225         dev_err(dev, "We cannot prepare for the RX slave dma!\n");
1226         return -EINVAL;
1227     }
1228     desc->callback = imx_uart_dma_rx_callback;
1229     desc->callback_param = sport;
1230 
1231     dev_dbg(dev, "RX: prepare for the DMA.\n");
1232     sport->dma_is_rxing = 1;
1233     sport->rx_cookie = dmaengine_submit(desc);
1234     dma_async_issue_pending(chan);
1235     return 0;
1236 }
1237 
1238 static void imx_uart_clear_rx_errors(struct imx_port *sport)
1239 {
1240     struct tty_port *port = &sport->port.state->port;
1241     u32 usr1, usr2;
1242 
1243     usr1 = imx_uart_readl(sport, USR1);
1244     usr2 = imx_uart_readl(sport, USR2);
1245 
1246     if (usr2 & USR2_BRCD) {
1247         sport->port.icount.brk++;
1248         imx_uart_writel(sport, USR2_BRCD, USR2);
1249         uart_handle_break(&sport->port);
1250         if (tty_insert_flip_char(port, 0, TTY_BREAK) == 0)
1251             sport->port.icount.buf_overrun++;
1252         tty_flip_buffer_push(port);
1253     } else {
1254         if (usr1 & USR1_FRAMERR) {
1255             sport->port.icount.frame++;
1256             imx_uart_writel(sport, USR1_FRAMERR, USR1);
1257         } else if (usr1 & USR1_PARITYERR) {
1258             sport->port.icount.parity++;
1259             imx_uart_writel(sport, USR1_PARITYERR, USR1);
1260         }
1261     }
1262 
1263     if (usr2 & USR2_ORE) {
1264         sport->port.icount.overrun++;
1265         imx_uart_writel(sport, USR2_ORE, USR2);
1266     }
1267 
1268 }
1269 
1270 #define TXTL_DEFAULT 2 /* reset default */
1271 #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
1272 #define TXTL_DMA 8 /* DMA burst setting */
1273 #define RXTL_DMA 9 /* DMA burst setting */
1274 
1275 static void imx_uart_setup_ufcr(struct imx_port *sport,
1276                 unsigned char txwl, unsigned char rxwl)
1277 {
1278     unsigned int val;
1279 
1280     /* set receiver / transmitter trigger level */
1281     val = imx_uart_readl(sport, UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
1282     val |= txwl << UFCR_TXTL_SHF | rxwl;
1283     imx_uart_writel(sport, val, UFCR);
1284 }
1285 
1286 static void imx_uart_dma_exit(struct imx_port *sport)
1287 {
1288     if (sport->dma_chan_rx) {
1289         dmaengine_terminate_sync(sport->dma_chan_rx);
1290         dma_release_channel(sport->dma_chan_rx);
1291         sport->dma_chan_rx = NULL;
1292         sport->rx_cookie = -EINVAL;
1293         kfree(sport->rx_buf);
1294         sport->rx_buf = NULL;
1295     }
1296 
1297     if (sport->dma_chan_tx) {
1298         dmaengine_terminate_sync(sport->dma_chan_tx);
1299         dma_release_channel(sport->dma_chan_tx);
1300         sport->dma_chan_tx = NULL;
1301     }
1302 }
1303 
1304 static int imx_uart_dma_init(struct imx_port *sport)
1305 {
1306     struct dma_slave_config slave_config = {};
1307     struct device *dev = sport->port.dev;
1308     int ret;
1309 
1310     /* Prepare for RX : */
1311     sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
1312     if (!sport->dma_chan_rx) {
1313         dev_dbg(dev, "cannot get the DMA channel.\n");
1314         ret = -EINVAL;
1315         goto err;
1316     }
1317 
1318     slave_config.direction = DMA_DEV_TO_MEM;
1319     slave_config.src_addr = sport->port.mapbase + URXD0;
1320     slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1321     /* one byte less than the watermark level to enable the aging timer */
1322     slave_config.src_maxburst = RXTL_DMA - 1;
1323     ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
1324     if (ret) {
1325         dev_err(dev, "error in RX dma configuration.\n");
1326         goto err;
1327     }
1328 
1329     sport->rx_buf_size = sport->rx_period_length * sport->rx_periods;
1330     sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
1331     if (!sport->rx_buf) {
1332         ret = -ENOMEM;
1333         goto err;
1334     }
1335     sport->rx_ring.buf = sport->rx_buf;
1336 
1337     /* Prepare for TX : */
1338     sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1339     if (!sport->dma_chan_tx) {
1340         dev_err(dev, "cannot get the TX DMA channel!\n");
1341         ret = -EINVAL;
1342         goto err;
1343     }
1344 
1345     slave_config.direction = DMA_MEM_TO_DEV;
1346     slave_config.dst_addr = sport->port.mapbase + URTX0;
1347     slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1348     slave_config.dst_maxburst = TXTL_DMA;
1349     ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1350     if (ret) {
1351         dev_err(dev, "error in TX dma configuration.");
1352         goto err;
1353     }
1354 
1355     return 0;
1356 err:
1357     imx_uart_dma_exit(sport);
1358     return ret;
1359 }
1360 
1361 static void imx_uart_enable_dma(struct imx_port *sport)
1362 {
1363     u32 ucr1;
1364 
1365     imx_uart_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
1366 
1367     /* set UCR1 */
1368     ucr1 = imx_uart_readl(sport, UCR1);
1369     ucr1 |= UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN;
1370     imx_uart_writel(sport, ucr1, UCR1);
1371 
1372     sport->dma_is_enabled = 1;
1373 }
1374 
1375 static void imx_uart_disable_dma(struct imx_port *sport)
1376 {
1377     u32 ucr1;
1378 
1379     /* clear UCR1 */
1380     ucr1 = imx_uart_readl(sport, UCR1);
1381     ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN);
1382     imx_uart_writel(sport, ucr1, UCR1);
1383 
1384     imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1385 
1386     sport->dma_is_enabled = 0;
1387 }
1388 
1389 /* half the RX buffer size */
1390 #define CTSTL 16
1391 
1392 static int imx_uart_startup(struct uart_port *port)
1393 {
1394     struct imx_port *sport = (struct imx_port *)port;
1395     int retval, i;
1396     unsigned long flags;
1397     int dma_is_inited = 0;
1398     u32 ucr1, ucr2, ucr3, ucr4;
1399 
1400     retval = clk_prepare_enable(sport->clk_per);
1401     if (retval)
1402         return retval;
1403     retval = clk_prepare_enable(sport->clk_ipg);
1404     if (retval) {
1405         clk_disable_unprepare(sport->clk_per);
1406         return retval;
1407     }
1408 
1409     imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1410 
1411     /* disable the DREN bit (Data Ready interrupt enable) before
1412      * requesting IRQs
1413      */
1414     ucr4 = imx_uart_readl(sport, UCR4);
1415 
1416     /* set the trigger level for CTS */
1417     ucr4 &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1418     ucr4 |= CTSTL << UCR4_CTSTL_SHF;
1419 
1420     imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4);
1421 
1422     /* Can we enable the DMA support? */
1423     if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
1424         dma_is_inited = 1;
1425 
1426     spin_lock_irqsave(&sport->port.lock, flags);
1427     /* Reset fifo's and state machines */
1428     i = 100;
1429 
1430     ucr2 = imx_uart_readl(sport, UCR2);
1431     ucr2 &= ~UCR2_SRST;
1432     imx_uart_writel(sport, ucr2, UCR2);
1433 
1434     while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
1435         udelay(1);
1436 
1437     /*
1438      * Finally, clear and enable interrupts
1439      */
1440     imx_uart_writel(sport, USR1_RTSD | USR1_DTRD, USR1);
1441     imx_uart_writel(sport, USR2_ORE, USR2);
1442 
1443     ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_RRDYEN;
1444     ucr1 |= UCR1_UARTEN;
1445     if (sport->have_rtscts)
1446         ucr1 |= UCR1_RTSDEN;
1447 
1448     imx_uart_writel(sport, ucr1, UCR1);
1449 
1450     ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
1451     if (!dma_is_inited)
1452         ucr4 |= UCR4_OREN;
1453     if (sport->inverted_rx)
1454         ucr4 |= UCR4_INVR;
1455     imx_uart_writel(sport, ucr4, UCR4);
1456 
1457     ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT;
1458     /*
1459      * configure tx polarity before enabling tx
1460      */
1461     if (sport->inverted_tx)
1462         ucr3 |= UCR3_INVT;
1463 
1464     if (!imx_uart_is_imx1(sport)) {
1465         ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
1466 
1467         if (sport->dte_mode)
1468             /* disable broken interrupts */
1469             ucr3 &= ~(UCR3_RI | UCR3_DCD);
1470     }
1471     imx_uart_writel(sport, ucr3, UCR3);
1472 
1473     ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN;
1474     ucr2 |= (UCR2_RXEN | UCR2_TXEN);
1475     if (!sport->have_rtscts)
1476         ucr2 |= UCR2_IRTS;
1477     /*
1478      * make sure the edge sensitive RTS-irq is disabled,
1479      * we're using RTSD instead.
1480      */
1481     if (!imx_uart_is_imx1(sport))
1482         ucr2 &= ~UCR2_RTSEN;
1483     imx_uart_writel(sport, ucr2, UCR2);
1484 
1485     /*
1486      * Enable modem status interrupts
1487      */
1488     imx_uart_enable_ms(&sport->port);
1489 
1490     if (dma_is_inited) {
1491         imx_uart_enable_dma(sport);
1492         imx_uart_start_rx_dma(sport);
1493     } else {
1494         ucr1 = imx_uart_readl(sport, UCR1);
1495         ucr1 |= UCR1_RRDYEN;
1496         imx_uart_writel(sport, ucr1, UCR1);
1497 
1498         ucr2 = imx_uart_readl(sport, UCR2);
1499         ucr2 |= UCR2_ATEN;
1500         imx_uart_writel(sport, ucr2, UCR2);
1501     }
1502 
1503     spin_unlock_irqrestore(&sport->port.lock, flags);
1504 
1505     return 0;
1506 }
1507 
1508 static void imx_uart_shutdown(struct uart_port *port)
1509 {
1510     struct imx_port *sport = (struct imx_port *)port;
1511     unsigned long flags;
1512     u32 ucr1, ucr2, ucr4;
1513 
1514     if (sport->dma_is_enabled) {
1515         dmaengine_terminate_sync(sport->dma_chan_tx);
1516         if (sport->dma_is_txing) {
1517             dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
1518                      sport->dma_tx_nents, DMA_TO_DEVICE);
1519             sport->dma_is_txing = 0;
1520         }
1521         dmaengine_terminate_sync(sport->dma_chan_rx);
1522         if (sport->dma_is_rxing) {
1523             dma_unmap_sg(sport->port.dev, &sport->rx_sgl,
1524                      1, DMA_FROM_DEVICE);
1525             sport->dma_is_rxing = 0;
1526         }
1527 
1528         spin_lock_irqsave(&sport->port.lock, flags);
1529         imx_uart_stop_tx(port);
1530         imx_uart_stop_rx(port);
1531         imx_uart_disable_dma(sport);
1532         spin_unlock_irqrestore(&sport->port.lock, flags);
1533         imx_uart_dma_exit(sport);
1534     }
1535 
1536     mctrl_gpio_disable_ms(sport->gpios);
1537 
1538     spin_lock_irqsave(&sport->port.lock, flags);
1539     ucr2 = imx_uart_readl(sport, UCR2);
1540     ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
1541     imx_uart_writel(sport, ucr2, UCR2);
1542     spin_unlock_irqrestore(&sport->port.lock, flags);
1543 
1544     /*
1545      * Stop our timer.
1546      */
1547     del_timer_sync(&sport->timer);
1548 
1549     /*
1550      * Disable all interrupts, port and break condition.
1551      */
1552 
1553     spin_lock_irqsave(&sport->port.lock, flags);
1554 
1555     ucr1 = imx_uart_readl(sport, UCR1);
1556     ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
1557     imx_uart_writel(sport, ucr1, UCR1);
1558 
1559     ucr4 = imx_uart_readl(sport, UCR4);
1560     ucr4 &= ~UCR4_TCEN;
1561     imx_uart_writel(sport, ucr4, UCR4);
1562 
1563     spin_unlock_irqrestore(&sport->port.lock, flags);
1564 
1565     clk_disable_unprepare(sport->clk_per);
1566     clk_disable_unprepare(sport->clk_ipg);
1567 }
1568 
1569 /* called with port.lock taken and irqs off */
1570 static void imx_uart_flush_buffer(struct uart_port *port)
1571 {
1572     struct imx_port *sport = (struct imx_port *)port;
1573     struct scatterlist *sgl = &sport->tx_sgl[0];
1574     u32 ucr2;
1575     int i = 100, ubir, ubmr, uts;
1576 
1577     if (!sport->dma_chan_tx)
1578         return;
1579 
1580     sport->tx_bytes = 0;
1581     dmaengine_terminate_all(sport->dma_chan_tx);
1582     if (sport->dma_is_txing) {
1583         u32 ucr1;
1584 
1585         dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
1586                  DMA_TO_DEVICE);
1587         ucr1 = imx_uart_readl(sport, UCR1);
1588         ucr1 &= ~UCR1_TXDMAEN;
1589         imx_uart_writel(sport, ucr1, UCR1);
1590         sport->dma_is_txing = 0;
1591     }
1592 
1593     /*
1594      * According to the Reference Manual description of the UART SRST bit:
1595      *
1596      * "Reset the transmit and receive state machines,
1597      * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
1598      * and UTS[6-3]".
1599      *
1600      * We don't need to restore the old values from USR1, USR2, URXD and
1601      * UTXD. UBRC is read only, so only save/restore the other three
1602      * registers.
1603      */
1604     ubir = imx_uart_readl(sport, UBIR);
1605     ubmr = imx_uart_readl(sport, UBMR);
1606     uts = imx_uart_readl(sport, IMX21_UTS);
1607 
1608     ucr2 = imx_uart_readl(sport, UCR2);
1609     ucr2 &= ~UCR2_SRST;
1610     imx_uart_writel(sport, ucr2, UCR2);
1611 
1612     while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
1613         udelay(1);
1614 
1615     /* Restore the registers */
1616     imx_uart_writel(sport, ubir, UBIR);
1617     imx_uart_writel(sport, ubmr, UBMR);
1618     imx_uart_writel(sport, uts, IMX21_UTS);
1619 }
1620 
1621 static void
1622 imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
1623              struct ktermios *old)
1624 {
1625     struct imx_port *sport = (struct imx_port *)port;
1626     unsigned long flags;
1627     u32 ucr2, old_ucr2, ufcr;
1628     unsigned int baud, quot;
1629     unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1630     unsigned long div;
1631     unsigned long num, denom, old_ubir, old_ubmr;
1632     uint64_t tdiv64;
1633 
1634     /*
1635      * We only support CS7 and CS8.
1636      */
1637     while ((termios->c_cflag & CSIZE) != CS7 &&
1638            (termios->c_cflag & CSIZE) != CS8) {
1639         termios->c_cflag &= ~CSIZE;
1640         termios->c_cflag |= old_csize;
1641         old_csize = CS8;
1642     }
1643 
1644     del_timer_sync(&sport->timer);
1645 
1646     /*
1647      * Ask the core to calculate the divisor for us.
1648      */
1649     baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1650     quot = uart_get_divisor(port, baud);
1651 
1652     spin_lock_irqsave(&sport->port.lock, flags);
1653 
1654     /*
1655      * Read current UCR2 and save it for future use, then clear all the bits
1656      * except those we will or may need to preserve.
1657      */
1658     old_ucr2 = imx_uart_readl(sport, UCR2);
1659     ucr2 = old_ucr2 & (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN | UCR2_CTS);
1660 
1661     ucr2 |= UCR2_SRST | UCR2_IRTS;
1662     if ((termios->c_cflag & CSIZE) == CS8)
1663         ucr2 |= UCR2_WS;
1664 
1665     if (!sport->have_rtscts)
1666         termios->c_cflag &= ~CRTSCTS;
1667 
1668     if (port->rs485.flags & SER_RS485_ENABLED) {
1669         /*
1670          * RTS is mandatory for rs485 operation, so keep
1671          * it under manual control and keep transmitter
1672          * disabled.
1673          */
1674         if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1675             imx_uart_rts_active(sport, &ucr2);
1676         else
1677             imx_uart_rts_inactive(sport, &ucr2);
1678 
1679     } else if (termios->c_cflag & CRTSCTS) {
1680         /*
1681          * Only let receiver control RTS output if we were not requested
1682          * to have RTS inactive (which then should take precedence).
1683          */
1684         if (ucr2 & UCR2_CTS)
1685             ucr2 |= UCR2_CTSC;
1686     }
1687 
1688     if (termios->c_cflag & CRTSCTS)
1689         ucr2 &= ~UCR2_IRTS;
1690     if (termios->c_cflag & CSTOPB)
1691         ucr2 |= UCR2_STPB;
1692     if (termios->c_cflag & PARENB) {
1693         ucr2 |= UCR2_PREN;
1694         if (termios->c_cflag & PARODD)
1695             ucr2 |= UCR2_PROE;
1696     }
1697 
1698     sport->port.read_status_mask = 0;
1699     if (termios->c_iflag & INPCK)
1700         sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1701     if (termios->c_iflag & (BRKINT | PARMRK))
1702         sport->port.read_status_mask |= URXD_BRK;
1703 
1704     /*
1705      * Characters to ignore
1706      */
1707     sport->port.ignore_status_mask = 0;
1708     if (termios->c_iflag & IGNPAR)
1709         sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
1710     if (termios->c_iflag & IGNBRK) {
1711         sport->port.ignore_status_mask |= URXD_BRK;
1712         /*
1713          * If we're ignoring parity and break indicators,
1714          * ignore overruns too (for real raw support).
1715          */
1716         if (termios->c_iflag & IGNPAR)
1717             sport->port.ignore_status_mask |= URXD_OVRRUN;
1718     }
1719 
1720     if ((termios->c_cflag & CREAD) == 0)
1721         sport->port.ignore_status_mask |= URXD_DUMMY_READ;
1722 
1723     /*
1724      * Update the per-port timeout.
1725      */
1726     uart_update_timeout(port, termios->c_cflag, baud);
1727 
1728     /* custom-baudrate handling */
1729     div = sport->port.uartclk / (baud * 16);
1730     if (baud == 38400 && quot != div)
1731         baud = sport->port.uartclk / (quot * 16);
1732 
1733     div = sport->port.uartclk / (baud * 16);
1734     if (div > 7)
1735         div = 7;
1736     if (!div)
1737         div = 1;
1738 
1739     rational_best_approximation(16 * div * baud, sport->port.uartclk,
1740         1 << 16, 1 << 16, &num, &denom);
1741 
1742     tdiv64 = sport->port.uartclk;
1743     tdiv64 *= num;
1744     do_div(tdiv64, denom * 16 * div);
1745     tty_termios_encode_baud_rate(termios,
1746                 (speed_t)tdiv64, (speed_t)tdiv64);
1747 
1748     num -= 1;
1749     denom -= 1;
1750 
1751     ufcr = imx_uart_readl(sport, UFCR);
1752     ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1753     imx_uart_writel(sport, ufcr, UFCR);
1754 
1755     /*
1756      *  Two registers below should always be written both and in this
1757      *  particular order. One consequence is that we need to check if any of
1758      *  them changes and then update both. We do need the check for change
1759      *  as even writing the same values seem to "restart"
1760      *  transmission/receiving logic in the hardware, that leads to data
1761      *  breakage even when rate doesn't in fact change. E.g., user switches
1762      *  RTS/CTS handshake and suddenly gets broken bytes.
1763      */
1764     old_ubir = imx_uart_readl(sport, UBIR);
1765     old_ubmr = imx_uart_readl(sport, UBMR);
1766     if (old_ubir != num || old_ubmr != denom) {
1767         imx_uart_writel(sport, num, UBIR);
1768         imx_uart_writel(sport, denom, UBMR);
1769     }
1770 
1771     if (!imx_uart_is_imx1(sport))
1772         imx_uart_writel(sport, sport->port.uartclk / div / 1000,
1773                 IMX21_ONEMS);
1774 
1775     imx_uart_writel(sport, ucr2, UCR2);
1776 
1777     if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1778         imx_uart_enable_ms(&sport->port);
1779 
1780     spin_unlock_irqrestore(&sport->port.lock, flags);
1781 }
1782 
1783 static const char *imx_uart_type(struct uart_port *port)
1784 {
1785     struct imx_port *sport = (struct imx_port *)port;
1786 
1787     return sport->port.type == PORT_IMX ? "IMX" : NULL;
1788 }
1789 
1790 /*
1791  * Configure/autoconfigure the port.
1792  */
1793 static void imx_uart_config_port(struct uart_port *port, int flags)
1794 {
1795     struct imx_port *sport = (struct imx_port *)port;
1796 
1797     if (flags & UART_CONFIG_TYPE)
1798         sport->port.type = PORT_IMX;
1799 }
1800 
1801 /*
1802  * Verify the new serial_struct (for TIOCSSERIAL).
1803  * The only change we allow are to the flags and type, and
1804  * even then only between PORT_IMX and PORT_UNKNOWN
1805  */
1806 static int
1807 imx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
1808 {
1809     struct imx_port *sport = (struct imx_port *)port;
1810     int ret = 0;
1811 
1812     if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1813         ret = -EINVAL;
1814     if (sport->port.irq != ser->irq)
1815         ret = -EINVAL;
1816     if (ser->io_type != UPIO_MEM)
1817         ret = -EINVAL;
1818     if (sport->port.uartclk / 16 != ser->baud_base)
1819         ret = -EINVAL;
1820     if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1821         ret = -EINVAL;
1822     if (sport->port.iobase != ser->port)
1823         ret = -EINVAL;
1824     if (ser->hub6 != 0)
1825         ret = -EINVAL;
1826     return ret;
1827 }
1828 
1829 #if defined(CONFIG_CONSOLE_POLL)
1830 
1831 static int imx_uart_poll_init(struct uart_port *port)
1832 {
1833     struct imx_port *sport = (struct imx_port *)port;
1834     unsigned long flags;
1835     u32 ucr1, ucr2;
1836     int retval;
1837 
1838     retval = clk_prepare_enable(sport->clk_ipg);
1839     if (retval)
1840         return retval;
1841     retval = clk_prepare_enable(sport->clk_per);
1842     if (retval)
1843         clk_disable_unprepare(sport->clk_ipg);
1844 
1845     imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1846 
1847     spin_lock_irqsave(&sport->port.lock, flags);
1848 
1849     /*
1850      * Be careful about the order of enabling bits here. First enable the
1851      * receiver (UARTEN + RXEN) and only then the corresponding irqs.
1852      * This prevents that a character that already sits in the RX fifo is
1853      * triggering an irq but the try to fetch it from there results in an
1854      * exception because UARTEN or RXEN is still off.
1855      */
1856     ucr1 = imx_uart_readl(sport, UCR1);
1857     ucr2 = imx_uart_readl(sport, UCR2);
1858 
1859     if (imx_uart_is_imx1(sport))
1860         ucr1 |= IMX1_UCR1_UARTCLKEN;
1861 
1862     ucr1 |= UCR1_UARTEN;
1863     ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN);
1864 
1865     ucr2 |= UCR2_RXEN | UCR2_TXEN;
1866     ucr2 &= ~UCR2_ATEN;
1867 
1868     imx_uart_writel(sport, ucr1, UCR1);
1869     imx_uart_writel(sport, ucr2, UCR2);
1870 
1871     /* now enable irqs */
1872     imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
1873     imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
1874 
1875     spin_unlock_irqrestore(&sport->port.lock, flags);
1876 
1877     return 0;
1878 }
1879 
1880 static int imx_uart_poll_get_char(struct uart_port *port)
1881 {
1882     struct imx_port *sport = (struct imx_port *)port;
1883     if (!(imx_uart_readl(sport, USR2) & USR2_RDR))
1884         return NO_POLL_CHAR;
1885 
1886     return imx_uart_readl(sport, URXD0) & URXD_RX_DATA;
1887 }
1888 
1889 static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
1890 {
1891     struct imx_port *sport = (struct imx_port *)port;
1892     unsigned int status;
1893 
1894     /* drain */
1895     do {
1896         status = imx_uart_readl(sport, USR1);
1897     } while (~status & USR1_TRDY);
1898 
1899     /* write */
1900     imx_uart_writel(sport, c, URTX0);
1901 
1902     /* flush */
1903     do {
1904         status = imx_uart_readl(sport, USR2);
1905     } while (~status & USR2_TXDC);
1906 }
1907 #endif
1908 
1909 /* called with port.lock taken and irqs off or from .probe without locking */
1910 static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
1911                  struct serial_rs485 *rs485conf)
1912 {
1913     struct imx_port *sport = (struct imx_port *)port;
1914     u32 ucr2;
1915 
1916     if (rs485conf->flags & SER_RS485_ENABLED) {
1917         /* Enable receiver if low-active RTS signal is requested */
1918         if (sport->have_rtscts &&  !sport->have_rtsgpio &&
1919             !(rs485conf->flags & SER_RS485_RTS_ON_SEND))
1920             rs485conf->flags |= SER_RS485_RX_DURING_TX;
1921 
1922         /* disable transmitter */
1923         ucr2 = imx_uart_readl(sport, UCR2);
1924         if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
1925             imx_uart_rts_active(sport, &ucr2);
1926         else
1927             imx_uart_rts_inactive(sport, &ucr2);
1928         imx_uart_writel(sport, ucr2, UCR2);
1929     }
1930 
1931     /* Make sure Rx is enabled in case Tx is active with Rx disabled */
1932     if (!(rs485conf->flags & SER_RS485_ENABLED) ||
1933         rs485conf->flags & SER_RS485_RX_DURING_TX)
1934         imx_uart_start_rx(port);
1935 
1936     return 0;
1937 }
1938 
1939 static const struct uart_ops imx_uart_pops = {
1940     .tx_empty   = imx_uart_tx_empty,
1941     .set_mctrl  = imx_uart_set_mctrl,
1942     .get_mctrl  = imx_uart_get_mctrl,
1943     .stop_tx    = imx_uart_stop_tx,
1944     .start_tx   = imx_uart_start_tx,
1945     .stop_rx    = imx_uart_stop_rx,
1946     .enable_ms  = imx_uart_enable_ms,
1947     .break_ctl  = imx_uart_break_ctl,
1948     .startup    = imx_uart_startup,
1949     .shutdown   = imx_uart_shutdown,
1950     .flush_buffer   = imx_uart_flush_buffer,
1951     .set_termios    = imx_uart_set_termios,
1952     .type       = imx_uart_type,
1953     .config_port    = imx_uart_config_port,
1954     .verify_port    = imx_uart_verify_port,
1955 #if defined(CONFIG_CONSOLE_POLL)
1956     .poll_init      = imx_uart_poll_init,
1957     .poll_get_char  = imx_uart_poll_get_char,
1958     .poll_put_char  = imx_uart_poll_put_char,
1959 #endif
1960 };
1961 
1962 static struct imx_port *imx_uart_ports[UART_NR];
1963 
1964 #if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE)
1965 static void imx_uart_console_putchar(struct uart_port *port, unsigned char ch)
1966 {
1967     struct imx_port *sport = (struct imx_port *)port;
1968 
1969     while (imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)
1970         barrier();
1971 
1972     imx_uart_writel(sport, ch, URTX0);
1973 }
1974 
1975 /*
1976  * Interrupts are disabled on entering
1977  */
1978 static void
1979 imx_uart_console_write(struct console *co, const char *s, unsigned int count)
1980 {
1981     struct imx_port *sport = imx_uart_ports[co->index];
1982     struct imx_port_ucrs old_ucr;
1983     unsigned long flags;
1984     unsigned int ucr1;
1985     int locked = 1;
1986 
1987     if (sport->port.sysrq)
1988         locked = 0;
1989     else if (oops_in_progress)
1990         locked = spin_trylock_irqsave(&sport->port.lock, flags);
1991     else
1992         spin_lock_irqsave(&sport->port.lock, flags);
1993 
1994     /*
1995      *  First, save UCR1/2/3 and then disable interrupts
1996      */
1997     imx_uart_ucrs_save(sport, &old_ucr);
1998     ucr1 = old_ucr.ucr1;
1999 
2000     if (imx_uart_is_imx1(sport))
2001         ucr1 |= IMX1_UCR1_UARTCLKEN;
2002     ucr1 |= UCR1_UARTEN;
2003     ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN);
2004 
2005     imx_uart_writel(sport, ucr1, UCR1);
2006 
2007     imx_uart_writel(sport, old_ucr.ucr2 | UCR2_TXEN, UCR2);
2008 
2009     uart_console_write(&sport->port, s, count, imx_uart_console_putchar);
2010 
2011     /*
2012      *  Finally, wait for transmitter to become empty
2013      *  and restore UCR1/2/3
2014      */
2015     while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
2016 
2017     imx_uart_ucrs_restore(sport, &old_ucr);
2018 
2019     if (locked)
2020         spin_unlock_irqrestore(&sport->port.lock, flags);
2021 }
2022 
2023 /*
2024  * If the port was already initialised (eg, by a boot loader),
2025  * try to determine the current setup.
2026  */
2027 static void
2028 imx_uart_console_get_options(struct imx_port *sport, int *baud,
2029                  int *parity, int *bits)
2030 {
2031 
2032     if (imx_uart_readl(sport, UCR1) & UCR1_UARTEN) {
2033         /* ok, the port was enabled */
2034         unsigned int ucr2, ubir, ubmr, uartclk;
2035         unsigned int baud_raw;
2036         unsigned int ucfr_rfdiv;
2037 
2038         ucr2 = imx_uart_readl(sport, UCR2);
2039 
2040         *parity = 'n';
2041         if (ucr2 & UCR2_PREN) {
2042             if (ucr2 & UCR2_PROE)
2043                 *parity = 'o';
2044             else
2045                 *parity = 'e';
2046         }
2047 
2048         if (ucr2 & UCR2_WS)
2049             *bits = 8;
2050         else
2051             *bits = 7;
2052 
2053         ubir = imx_uart_readl(sport, UBIR) & 0xffff;
2054         ubmr = imx_uart_readl(sport, UBMR) & 0xffff;
2055 
2056         ucfr_rfdiv = (imx_uart_readl(sport, UFCR) & UFCR_RFDIV) >> 7;
2057         if (ucfr_rfdiv == 6)
2058             ucfr_rfdiv = 7;
2059         else
2060             ucfr_rfdiv = 6 - ucfr_rfdiv;
2061 
2062         uartclk = clk_get_rate(sport->clk_per);
2063         uartclk /= ucfr_rfdiv;
2064 
2065         {   /*
2066              * The next code provides exact computation of
2067              *   baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
2068              * without need of float support or long long division,
2069              * which would be required to prevent 32bit arithmetic overflow
2070              */
2071             unsigned int mul = ubir + 1;
2072             unsigned int div = 16 * (ubmr + 1);
2073             unsigned int rem = uartclk % div;
2074 
2075             baud_raw = (uartclk / div) * mul;
2076             baud_raw += (rem * mul + div / 2) / div;
2077             *baud = (baud_raw + 50) / 100 * 100;
2078         }
2079 
2080         if (*baud != baud_raw)
2081             dev_info(sport->port.dev, "Console IMX rounded baud rate from %d to %d\n",
2082                 baud_raw, *baud);
2083     }
2084 }
2085 
2086 static int
2087 imx_uart_console_setup(struct console *co, char *options)
2088 {
2089     struct imx_port *sport;
2090     int baud = 9600;
2091     int bits = 8;
2092     int parity = 'n';
2093     int flow = 'n';
2094     int retval;
2095 
2096     /*
2097      * Check whether an invalid uart number has been specified, and
2098      * if so, search for the first available port that does have
2099      * console support.
2100      */
2101     if (co->index == -1 || co->index >= ARRAY_SIZE(imx_uart_ports))
2102         co->index = 0;
2103     sport = imx_uart_ports[co->index];
2104     if (sport == NULL)
2105         return -ENODEV;
2106 
2107     /* For setting the registers, we only need to enable the ipg clock. */
2108     retval = clk_prepare_enable(sport->clk_ipg);
2109     if (retval)
2110         goto error_console;
2111 
2112     if (options)
2113         uart_parse_options(options, &baud, &parity, &bits, &flow);
2114     else
2115         imx_uart_console_get_options(sport, &baud, &parity, &bits);
2116 
2117     imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
2118 
2119     retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
2120 
2121     if (retval) {
2122         clk_disable_unprepare(sport->clk_ipg);
2123         goto error_console;
2124     }
2125 
2126     retval = clk_prepare_enable(sport->clk_per);
2127     if (retval)
2128         clk_disable_unprepare(sport->clk_ipg);
2129 
2130 error_console:
2131     return retval;
2132 }
2133 
2134 static int
2135 imx_uart_console_exit(struct console *co)
2136 {
2137     struct imx_port *sport = imx_uart_ports[co->index];
2138 
2139     clk_disable_unprepare(sport->clk_per);
2140     clk_disable_unprepare(sport->clk_ipg);
2141 
2142     return 0;
2143 }
2144 
2145 static struct uart_driver imx_uart_uart_driver;
2146 static struct console imx_uart_console = {
2147     .name       = DEV_NAME,
2148     .write      = imx_uart_console_write,
2149     .device     = uart_console_device,
2150     .setup      = imx_uart_console_setup,
2151     .exit       = imx_uart_console_exit,
2152     .flags      = CON_PRINTBUFFER,
2153     .index      = -1,
2154     .data       = &imx_uart_uart_driver,
2155 };
2156 
2157 #define IMX_CONSOLE &imx_uart_console
2158 
2159 #else
2160 #define IMX_CONSOLE NULL
2161 #endif
2162 
2163 static struct uart_driver imx_uart_uart_driver = {
2164     .owner          = THIS_MODULE,
2165     .driver_name    = DRIVER_NAME,
2166     .dev_name       = DEV_NAME,
2167     .major          = SERIAL_IMX_MAJOR,
2168     .minor          = MINOR_START,
2169     .nr             = ARRAY_SIZE(imx_uart_ports),
2170     .cons           = IMX_CONSOLE,
2171 };
2172 
2173 static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t)
2174 {
2175     struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
2176     unsigned long flags;
2177 
2178     spin_lock_irqsave(&sport->port.lock, flags);
2179     if (sport->tx_state == WAIT_AFTER_RTS)
2180         imx_uart_start_tx(&sport->port);
2181     spin_unlock_irqrestore(&sport->port.lock, flags);
2182 
2183     return HRTIMER_NORESTART;
2184 }
2185 
2186 static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
2187 {
2188     struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
2189     unsigned long flags;
2190 
2191     spin_lock_irqsave(&sport->port.lock, flags);
2192     if (sport->tx_state == WAIT_AFTER_SEND)
2193         imx_uart_stop_tx(&sport->port);
2194     spin_unlock_irqrestore(&sport->port.lock, flags);
2195 
2196     return HRTIMER_NORESTART;
2197 }
2198 
2199 static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
2200 static const struct serial_rs485 imx_rs485_supported = {
2201     .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
2202          SER_RS485_RX_DURING_TX,
2203     .delay_rts_before_send = 1,
2204     .delay_rts_after_send = 1,
2205 };
2206 
2207 /* Default RX DMA buffer configuration */
2208 #define RX_DMA_PERIODS      16
2209 #define RX_DMA_PERIOD_LEN   (PAGE_SIZE / 4)
2210 
2211 static int imx_uart_probe(struct platform_device *pdev)
2212 {
2213     struct device_node *np = pdev->dev.of_node;
2214     struct imx_port *sport;
2215     void __iomem *base;
2216     u32 dma_buf_conf[2];
2217     int ret = 0;
2218     u32 ucr1;
2219     struct resource *res;
2220     int txirq, rxirq, rtsirq;
2221 
2222     sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
2223     if (!sport)
2224         return -ENOMEM;
2225 
2226     sport->devdata = of_device_get_match_data(&pdev->dev);
2227 
2228     ret = of_alias_get_id(np, "serial");
2229     if (ret < 0) {
2230         dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
2231         return ret;
2232     }
2233     sport->port.line = ret;
2234 
2235     if (of_get_property(np, "uart-has-rtscts", NULL) ||
2236         of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
2237         sport->have_rtscts = 1;
2238 
2239     if (of_get_property(np, "fsl,dte-mode", NULL))
2240         sport->dte_mode = 1;
2241 
2242     if (of_get_property(np, "rts-gpios", NULL))
2243         sport->have_rtsgpio = 1;
2244 
2245     if (of_get_property(np, "fsl,inverted-tx", NULL))
2246         sport->inverted_tx = 1;
2247 
2248     if (of_get_property(np, "fsl,inverted-rx", NULL))
2249         sport->inverted_rx = 1;
2250 
2251     if (!of_property_read_u32_array(np, "fsl,dma-info", dma_buf_conf, 2)) {
2252         sport->rx_period_length = dma_buf_conf[0];
2253         sport->rx_periods = dma_buf_conf[1];
2254     } else {
2255         sport->rx_period_length = RX_DMA_PERIOD_LEN;
2256         sport->rx_periods = RX_DMA_PERIODS;
2257     }
2258 
2259     if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) {
2260         dev_err(&pdev->dev, "serial%d out of range\n",
2261             sport->port.line);
2262         return -EINVAL;
2263     }
2264 
2265     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2266     base = devm_ioremap_resource(&pdev->dev, res);
2267     if (IS_ERR(base))
2268         return PTR_ERR(base);
2269 
2270     rxirq = platform_get_irq(pdev, 0);
2271     if (rxirq < 0)
2272         return rxirq;
2273     txirq = platform_get_irq_optional(pdev, 1);
2274     rtsirq = platform_get_irq_optional(pdev, 2);
2275 
2276     sport->port.dev = &pdev->dev;
2277     sport->port.mapbase = res->start;
2278     sport->port.membase = base;
2279     sport->port.type = PORT_IMX;
2280     sport->port.iotype = UPIO_MEM;
2281     sport->port.irq = rxirq;
2282     sport->port.fifosize = 32;
2283     sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
2284     sport->port.ops = &imx_uart_pops;
2285     sport->port.rs485_config = imx_uart_rs485_config;
2286     /* RTS is required to control the RS485 transmitter */
2287     if (sport->have_rtscts || sport->have_rtsgpio)
2288         sport->port.rs485_supported = imx_rs485_supported;
2289     else
2290         sport->port.rs485_supported = imx_no_rs485;
2291     sport->port.flags = UPF_BOOT_AUTOCONF;
2292     timer_setup(&sport->timer, imx_uart_timeout, 0);
2293 
2294     sport->gpios = mctrl_gpio_init(&sport->port, 0);
2295     if (IS_ERR(sport->gpios))
2296         return PTR_ERR(sport->gpios);
2297 
2298     sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2299     if (IS_ERR(sport->clk_ipg)) {
2300         ret = PTR_ERR(sport->clk_ipg);
2301         dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
2302         return ret;
2303     }
2304 
2305     sport->clk_per = devm_clk_get(&pdev->dev, "per");
2306     if (IS_ERR(sport->clk_per)) {
2307         ret = PTR_ERR(sport->clk_per);
2308         dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
2309         return ret;
2310     }
2311 
2312     sport->port.uartclk = clk_get_rate(sport->clk_per);
2313 
2314     /* For register access, we only need to enable the ipg clock. */
2315     ret = clk_prepare_enable(sport->clk_ipg);
2316     if (ret) {
2317         dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
2318         return ret;
2319     }
2320 
2321     /* initialize shadow register values */
2322     sport->ucr1 = readl(sport->port.membase + UCR1);
2323     sport->ucr2 = readl(sport->port.membase + UCR2);
2324     sport->ucr3 = readl(sport->port.membase + UCR3);
2325     sport->ucr4 = readl(sport->port.membase + UCR4);
2326     sport->ufcr = readl(sport->port.membase + UFCR);
2327 
2328     ret = uart_get_rs485_mode(&sport->port);
2329     if (ret) {
2330         clk_disable_unprepare(sport->clk_ipg);
2331         return ret;
2332     }
2333 
2334     if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2335         (!sport->have_rtscts && !sport->have_rtsgpio))
2336         dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2337 
2338     /*
2339      * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
2340      * signal cannot be set low during transmission in case the
2341      * receiver is off (limitation of the i.MX UART IP).
2342      */
2343     if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2344         sport->have_rtscts && !sport->have_rtsgpio &&
2345         (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
2346          !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
2347         dev_err(&pdev->dev,
2348             "low-active RTS not possible when receiver is off, enabling receiver\n");
2349 
2350     uart_rs485_config(&sport->port);
2351 
2352     /* Disable interrupts before requesting them */
2353     ucr1 = imx_uart_readl(sport, UCR1);
2354     ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
2355     imx_uart_writel(sport, ucr1, UCR1);
2356 
2357     if (!imx_uart_is_imx1(sport) && sport->dte_mode) {
2358         /*
2359          * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI
2360          * and influences if UCR3_RI and UCR3_DCD changes the level of RI
2361          * and DCD (when they are outputs) or enables the respective
2362          * irqs. So set this bit early, i.e. before requesting irqs.
2363          */
2364         u32 ufcr = imx_uart_readl(sport, UFCR);
2365         if (!(ufcr & UFCR_DCEDTE))
2366             imx_uart_writel(sport, ufcr | UFCR_DCEDTE, UFCR);
2367 
2368         /*
2369          * Disable UCR3_RI and UCR3_DCD irqs. They are also not
2370          * enabled later because they cannot be cleared
2371          * (confirmed on i.MX25) which makes them unusable.
2372          */
2373         imx_uart_writel(sport,
2374                 IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR,
2375                 UCR3);
2376 
2377     } else {
2378         u32 ucr3 = UCR3_DSR;
2379         u32 ufcr = imx_uart_readl(sport, UFCR);
2380         if (ufcr & UFCR_DCEDTE)
2381             imx_uart_writel(sport, ufcr & ~UFCR_DCEDTE, UFCR);
2382 
2383         if (!imx_uart_is_imx1(sport))
2384             ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
2385         imx_uart_writel(sport, ucr3, UCR3);
2386     }
2387 
2388     clk_disable_unprepare(sport->clk_ipg);
2389 
2390     hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2391     hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2392     sport->trigger_start_tx.function = imx_trigger_start_tx;
2393     sport->trigger_stop_tx.function = imx_trigger_stop_tx;
2394 
2395     /*
2396      * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
2397      * chips only have one interrupt.
2398      */
2399     if (txirq > 0) {
2400         ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_rxint, 0,
2401                        dev_name(&pdev->dev), sport);
2402         if (ret) {
2403             dev_err(&pdev->dev, "failed to request rx irq: %d\n",
2404                 ret);
2405             return ret;
2406         }
2407 
2408         ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
2409                        dev_name(&pdev->dev), sport);
2410         if (ret) {
2411             dev_err(&pdev->dev, "failed to request tx irq: %d\n",
2412                 ret);
2413             return ret;
2414         }
2415 
2416         ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
2417                        dev_name(&pdev->dev), sport);
2418         if (ret) {
2419             dev_err(&pdev->dev, "failed to request rts irq: %d\n",
2420                 ret);
2421             return ret;
2422         }
2423     } else {
2424         ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
2425                        dev_name(&pdev->dev), sport);
2426         if (ret) {
2427             dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
2428             return ret;
2429         }
2430     }
2431 
2432     imx_uart_ports[sport->port.line] = sport;
2433 
2434     platform_set_drvdata(pdev, sport);
2435 
2436     return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
2437 }
2438 
2439 static int imx_uart_remove(struct platform_device *pdev)
2440 {
2441     struct imx_port *sport = platform_get_drvdata(pdev);
2442 
2443     return uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
2444 }
2445 
2446 static void imx_uart_restore_context(struct imx_port *sport)
2447 {
2448     unsigned long flags;
2449 
2450     spin_lock_irqsave(&sport->port.lock, flags);
2451     if (!sport->context_saved) {
2452         spin_unlock_irqrestore(&sport->port.lock, flags);
2453         return;
2454     }
2455 
2456     imx_uart_writel(sport, sport->saved_reg[4], UFCR);
2457     imx_uart_writel(sport, sport->saved_reg[5], UESC);
2458     imx_uart_writel(sport, sport->saved_reg[6], UTIM);
2459     imx_uart_writel(sport, sport->saved_reg[7], UBIR);
2460     imx_uart_writel(sport, sport->saved_reg[8], UBMR);
2461     imx_uart_writel(sport, sport->saved_reg[9], IMX21_UTS);
2462     imx_uart_writel(sport, sport->saved_reg[0], UCR1);
2463     imx_uart_writel(sport, sport->saved_reg[1] | UCR2_SRST, UCR2);
2464     imx_uart_writel(sport, sport->saved_reg[2], UCR3);
2465     imx_uart_writel(sport, sport->saved_reg[3], UCR4);
2466     sport->context_saved = false;
2467     spin_unlock_irqrestore(&sport->port.lock, flags);
2468 }
2469 
2470 static void imx_uart_save_context(struct imx_port *sport)
2471 {
2472     unsigned long flags;
2473 
2474     /* Save necessary regs */
2475     spin_lock_irqsave(&sport->port.lock, flags);
2476     sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
2477     sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
2478     sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
2479     sport->saved_reg[3] = imx_uart_readl(sport, UCR4);
2480     sport->saved_reg[4] = imx_uart_readl(sport, UFCR);
2481     sport->saved_reg[5] = imx_uart_readl(sport, UESC);
2482     sport->saved_reg[6] = imx_uart_readl(sport, UTIM);
2483     sport->saved_reg[7] = imx_uart_readl(sport, UBIR);
2484     sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
2485     sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
2486     sport->context_saved = true;
2487     spin_unlock_irqrestore(&sport->port.lock, flags);
2488 }
2489 
2490 static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
2491 {
2492     u32 ucr3;
2493 
2494     ucr3 = imx_uart_readl(sport, UCR3);
2495     if (on) {
2496         imx_uart_writel(sport, USR1_AWAKE, USR1);
2497         ucr3 |= UCR3_AWAKEN;
2498     } else {
2499         ucr3 &= ~UCR3_AWAKEN;
2500     }
2501     imx_uart_writel(sport, ucr3, UCR3);
2502 
2503     if (sport->have_rtscts) {
2504         u32 ucr1 = imx_uart_readl(sport, UCR1);
2505         if (on) {
2506             imx_uart_writel(sport, USR1_RTSD, USR1);
2507             ucr1 |= UCR1_RTSDEN;
2508         } else {
2509             ucr1 &= ~UCR1_RTSDEN;
2510         }
2511         imx_uart_writel(sport, ucr1, UCR1);
2512     }
2513 }
2514 
2515 static int imx_uart_suspend_noirq(struct device *dev)
2516 {
2517     struct imx_port *sport = dev_get_drvdata(dev);
2518 
2519     imx_uart_save_context(sport);
2520 
2521     clk_disable(sport->clk_ipg);
2522 
2523     pinctrl_pm_select_sleep_state(dev);
2524 
2525     return 0;
2526 }
2527 
2528 static int imx_uart_resume_noirq(struct device *dev)
2529 {
2530     struct imx_port *sport = dev_get_drvdata(dev);
2531     int ret;
2532 
2533     pinctrl_pm_select_default_state(dev);
2534 
2535     ret = clk_enable(sport->clk_ipg);
2536     if (ret)
2537         return ret;
2538 
2539     imx_uart_restore_context(sport);
2540 
2541     return 0;
2542 }
2543 
2544 static int imx_uart_suspend(struct device *dev)
2545 {
2546     struct imx_port *sport = dev_get_drvdata(dev);
2547     int ret;
2548 
2549     uart_suspend_port(&imx_uart_uart_driver, &sport->port);
2550     disable_irq(sport->port.irq);
2551 
2552     ret = clk_prepare_enable(sport->clk_ipg);
2553     if (ret)
2554         return ret;
2555 
2556     /* enable wakeup from i.MX UART */
2557     imx_uart_enable_wakeup(sport, true);
2558 
2559     return 0;
2560 }
2561 
2562 static int imx_uart_resume(struct device *dev)
2563 {
2564     struct imx_port *sport = dev_get_drvdata(dev);
2565 
2566     /* disable wakeup from i.MX UART */
2567     imx_uart_enable_wakeup(sport, false);
2568 
2569     uart_resume_port(&imx_uart_uart_driver, &sport->port);
2570     enable_irq(sport->port.irq);
2571 
2572     clk_disable_unprepare(sport->clk_ipg);
2573 
2574     return 0;
2575 }
2576 
2577 static int imx_uart_freeze(struct device *dev)
2578 {
2579     struct imx_port *sport = dev_get_drvdata(dev);
2580 
2581     uart_suspend_port(&imx_uart_uart_driver, &sport->port);
2582 
2583     return clk_prepare_enable(sport->clk_ipg);
2584 }
2585 
2586 static int imx_uart_thaw(struct device *dev)
2587 {
2588     struct imx_port *sport = dev_get_drvdata(dev);
2589 
2590     uart_resume_port(&imx_uart_uart_driver, &sport->port);
2591 
2592     clk_disable_unprepare(sport->clk_ipg);
2593 
2594     return 0;
2595 }
2596 
2597 static const struct dev_pm_ops imx_uart_pm_ops = {
2598     .suspend_noirq = imx_uart_suspend_noirq,
2599     .resume_noirq = imx_uart_resume_noirq,
2600     .freeze_noirq = imx_uart_suspend_noirq,
2601     .restore_noirq = imx_uart_resume_noirq,
2602     .suspend = imx_uart_suspend,
2603     .resume = imx_uart_resume,
2604     .freeze = imx_uart_freeze,
2605     .thaw = imx_uart_thaw,
2606     .restore = imx_uart_thaw,
2607 };
2608 
2609 static struct platform_driver imx_uart_platform_driver = {
2610     .probe = imx_uart_probe,
2611     .remove = imx_uart_remove,
2612 
2613     .driver = {
2614         .name = "imx-uart",
2615         .of_match_table = imx_uart_dt_ids,
2616         .pm = &imx_uart_pm_ops,
2617     },
2618 };
2619 
2620 static int __init imx_uart_init(void)
2621 {
2622     int ret = uart_register_driver(&imx_uart_uart_driver);
2623 
2624     if (ret)
2625         return ret;
2626 
2627     ret = platform_driver_register(&imx_uart_platform_driver);
2628     if (ret != 0)
2629         uart_unregister_driver(&imx_uart_uart_driver);
2630 
2631     return ret;
2632 }
2633 
2634 static void __exit imx_uart_exit(void)
2635 {
2636     platform_driver_unregister(&imx_uart_platform_driver);
2637     uart_unregister_driver(&imx_uart_uart_driver);
2638 }
2639 
2640 module_init(imx_uart_init);
2641 module_exit(imx_uart_exit);
2642 
2643 MODULE_AUTHOR("Sascha Hauer");
2644 MODULE_DESCRIPTION("IMX generic serial port driver");
2645 MODULE_LICENSE("GPL");
2646 MODULE_ALIAS("platform:imx-uart");