0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/module.h>
0020 #include <linux/ioport.h>
0021 #include <linux/init.h>
0022 #include <linux/console.h>
0023 #include <linux/sysrq.h>
0024 #include <linux/device.h>
0025 #include <linux/tty.h>
0026 #include <linux/tty_flip.h>
0027 #include <linux/serial_core.h>
0028 #include <linux/serial.h>
0029 #include <linux/amba/bus.h>
0030 #include <linux/amba/serial.h>
0031 #include <linux/clk.h>
0032 #include <linux/slab.h>
0033 #include <linux/dmaengine.h>
0034 #include <linux/dma-mapping.h>
0035 #include <linux/scatterlist.h>
0036 #include <linux/delay.h>
0037 #include <linux/types.h>
0038 #include <linux/of.h>
0039 #include <linux/of_device.h>
0040 #include <linux/pinctrl/consumer.h>
0041 #include <linux/sizes.h>
0042 #include <linux/io.h>
0043 #include <linux/acpi.h>
0044
0045 #define UART_NR 14
0046
0047 #define SERIAL_AMBA_MAJOR 204
0048 #define SERIAL_AMBA_MINOR 64
0049 #define SERIAL_AMBA_NR UART_NR
0050
0051 #define AMBA_ISR_PASS_LIMIT 256
0052
0053 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
0054 #define UART_DUMMY_DR_RX (1 << 16)
0055
0056 enum {
0057 REG_DR,
0058 REG_ST_DMAWM,
0059 REG_ST_TIMEOUT,
0060 REG_FR,
0061 REG_LCRH_RX,
0062 REG_LCRH_TX,
0063 REG_IBRD,
0064 REG_FBRD,
0065 REG_CR,
0066 REG_IFLS,
0067 REG_IMSC,
0068 REG_RIS,
0069 REG_MIS,
0070 REG_ICR,
0071 REG_DMACR,
0072 REG_ST_XFCR,
0073 REG_ST_XON1,
0074 REG_ST_XON2,
0075 REG_ST_XOFF1,
0076 REG_ST_XOFF2,
0077 REG_ST_ITCR,
0078 REG_ST_ITIP,
0079 REG_ST_ABCR,
0080 REG_ST_ABIMSC,
0081
0082
0083 REG_ARRAY_SIZE,
0084 };
0085
0086 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
0087 [REG_DR] = UART01x_DR,
0088 [REG_FR] = UART01x_FR,
0089 [REG_LCRH_RX] = UART011_LCRH,
0090 [REG_LCRH_TX] = UART011_LCRH,
0091 [REG_IBRD] = UART011_IBRD,
0092 [REG_FBRD] = UART011_FBRD,
0093 [REG_CR] = UART011_CR,
0094 [REG_IFLS] = UART011_IFLS,
0095 [REG_IMSC] = UART011_IMSC,
0096 [REG_RIS] = UART011_RIS,
0097 [REG_MIS] = UART011_MIS,
0098 [REG_ICR] = UART011_ICR,
0099 [REG_DMACR] = UART011_DMACR,
0100 };
0101
0102
0103 struct vendor_data {
0104 const u16 *reg_offset;
0105 unsigned int ifls;
0106 unsigned int fr_busy;
0107 unsigned int fr_dsr;
0108 unsigned int fr_cts;
0109 unsigned int fr_ri;
0110 unsigned int inv_fr;
0111 bool access_32b;
0112 bool oversampling;
0113 bool dma_threshold;
0114 bool cts_event_workaround;
0115 bool always_enabled;
0116 bool fixed_options;
0117
0118 unsigned int (*get_fifosize)(struct amba_device *dev);
0119 };
0120
0121 static unsigned int get_fifosize_arm(struct amba_device *dev)
0122 {
0123 return amba_rev(dev) < 3 ? 16 : 32;
0124 }
0125
0126 static struct vendor_data vendor_arm = {
0127 .reg_offset = pl011_std_offsets,
0128 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
0129 .fr_busy = UART01x_FR_BUSY,
0130 .fr_dsr = UART01x_FR_DSR,
0131 .fr_cts = UART01x_FR_CTS,
0132 .fr_ri = UART011_FR_RI,
0133 .oversampling = false,
0134 .dma_threshold = false,
0135 .cts_event_workaround = false,
0136 .always_enabled = false,
0137 .fixed_options = false,
0138 .get_fifosize = get_fifosize_arm,
0139 };
0140
0141 static const struct vendor_data vendor_sbsa = {
0142 .reg_offset = pl011_std_offsets,
0143 .fr_busy = UART01x_FR_BUSY,
0144 .fr_dsr = UART01x_FR_DSR,
0145 .fr_cts = UART01x_FR_CTS,
0146 .fr_ri = UART011_FR_RI,
0147 .access_32b = true,
0148 .oversampling = false,
0149 .dma_threshold = false,
0150 .cts_event_workaround = false,
0151 .always_enabled = true,
0152 .fixed_options = true,
0153 };
0154
0155 #ifdef CONFIG_ACPI_SPCR_TABLE
0156 static const struct vendor_data vendor_qdt_qdf2400_e44 = {
0157 .reg_offset = pl011_std_offsets,
0158 .fr_busy = UART011_FR_TXFE,
0159 .fr_dsr = UART01x_FR_DSR,
0160 .fr_cts = UART01x_FR_CTS,
0161 .fr_ri = UART011_FR_RI,
0162 .inv_fr = UART011_FR_TXFE,
0163 .access_32b = true,
0164 .oversampling = false,
0165 .dma_threshold = false,
0166 .cts_event_workaround = false,
0167 .always_enabled = true,
0168 .fixed_options = true,
0169 };
0170 #endif
0171
0172 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
0173 [REG_DR] = UART01x_DR,
0174 [REG_ST_DMAWM] = ST_UART011_DMAWM,
0175 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
0176 [REG_FR] = UART01x_FR,
0177 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
0178 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
0179 [REG_IBRD] = UART011_IBRD,
0180 [REG_FBRD] = UART011_FBRD,
0181 [REG_CR] = UART011_CR,
0182 [REG_IFLS] = UART011_IFLS,
0183 [REG_IMSC] = UART011_IMSC,
0184 [REG_RIS] = UART011_RIS,
0185 [REG_MIS] = UART011_MIS,
0186 [REG_ICR] = UART011_ICR,
0187 [REG_DMACR] = UART011_DMACR,
0188 [REG_ST_XFCR] = ST_UART011_XFCR,
0189 [REG_ST_XON1] = ST_UART011_XON1,
0190 [REG_ST_XON2] = ST_UART011_XON2,
0191 [REG_ST_XOFF1] = ST_UART011_XOFF1,
0192 [REG_ST_XOFF2] = ST_UART011_XOFF2,
0193 [REG_ST_ITCR] = ST_UART011_ITCR,
0194 [REG_ST_ITIP] = ST_UART011_ITIP,
0195 [REG_ST_ABCR] = ST_UART011_ABCR,
0196 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
0197 };
0198
0199 static unsigned int get_fifosize_st(struct amba_device *dev)
0200 {
0201 return 64;
0202 }
0203
0204 static struct vendor_data vendor_st = {
0205 .reg_offset = pl011_st_offsets,
0206 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
0207 .fr_busy = UART01x_FR_BUSY,
0208 .fr_dsr = UART01x_FR_DSR,
0209 .fr_cts = UART01x_FR_CTS,
0210 .fr_ri = UART011_FR_RI,
0211 .oversampling = true,
0212 .dma_threshold = true,
0213 .cts_event_workaround = true,
0214 .always_enabled = false,
0215 .fixed_options = false,
0216 .get_fifosize = get_fifosize_st,
0217 };
0218
0219
0220
0221 struct pl011_sgbuf {
0222 struct scatterlist sg;
0223 char *buf;
0224 };
0225
0226 struct pl011_dmarx_data {
0227 struct dma_chan *chan;
0228 struct completion complete;
0229 bool use_buf_b;
0230 struct pl011_sgbuf sgbuf_a;
0231 struct pl011_sgbuf sgbuf_b;
0232 dma_cookie_t cookie;
0233 bool running;
0234 struct timer_list timer;
0235 unsigned int last_residue;
0236 unsigned long last_jiffies;
0237 bool auto_poll_rate;
0238 unsigned int poll_rate;
0239 unsigned int poll_timeout;
0240 };
0241
0242 struct pl011_dmatx_data {
0243 struct dma_chan *chan;
0244 struct scatterlist sg;
0245 char *buf;
0246 bool queued;
0247 };
0248
0249
0250
0251
0252 struct uart_amba_port {
0253 struct uart_port port;
0254 const u16 *reg_offset;
0255 struct clk *clk;
0256 const struct vendor_data *vendor;
0257 unsigned int dmacr;
0258 unsigned int im;
0259 unsigned int old_status;
0260 unsigned int fifosize;
0261 unsigned int fixed_baud;
0262 char type[12];
0263 bool rs485_tx_started;
0264 unsigned int rs485_tx_drain_interval;
0265 #ifdef CONFIG_DMA_ENGINE
0266
0267 bool using_tx_dma;
0268 bool using_rx_dma;
0269 struct pl011_dmarx_data dmarx;
0270 struct pl011_dmatx_data dmatx;
0271 bool dma_probed;
0272 #endif
0273 };
0274
0275 static unsigned int pl011_tx_empty(struct uart_port *port);
0276
0277 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
0278 unsigned int reg)
0279 {
0280 return uap->reg_offset[reg];
0281 }
0282
0283 static unsigned int pl011_read(const struct uart_amba_port *uap,
0284 unsigned int reg)
0285 {
0286 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
0287
0288 return (uap->port.iotype == UPIO_MEM32) ?
0289 readl_relaxed(addr) : readw_relaxed(addr);
0290 }
0291
0292 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
0293 unsigned int reg)
0294 {
0295 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
0296
0297 if (uap->port.iotype == UPIO_MEM32)
0298 writel_relaxed(val, addr);
0299 else
0300 writew_relaxed(val, addr);
0301 }
0302
0303
0304
0305
0306
0307
0308 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
0309 {
0310 unsigned int ch, flag, fifotaken;
0311 int sysrq;
0312 u16 status;
0313
0314 for (fifotaken = 0; fifotaken != 256; fifotaken++) {
0315 status = pl011_read(uap, REG_FR);
0316 if (status & UART01x_FR_RXFE)
0317 break;
0318
0319
0320 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
0321 flag = TTY_NORMAL;
0322 uap->port.icount.rx++;
0323
0324 if (unlikely(ch & UART_DR_ERROR)) {
0325 if (ch & UART011_DR_BE) {
0326 ch &= ~(UART011_DR_FE | UART011_DR_PE);
0327 uap->port.icount.brk++;
0328 if (uart_handle_break(&uap->port))
0329 continue;
0330 } else if (ch & UART011_DR_PE)
0331 uap->port.icount.parity++;
0332 else if (ch & UART011_DR_FE)
0333 uap->port.icount.frame++;
0334 if (ch & UART011_DR_OE)
0335 uap->port.icount.overrun++;
0336
0337 ch &= uap->port.read_status_mask;
0338
0339 if (ch & UART011_DR_BE)
0340 flag = TTY_BREAK;
0341 else if (ch & UART011_DR_PE)
0342 flag = TTY_PARITY;
0343 else if (ch & UART011_DR_FE)
0344 flag = TTY_FRAME;
0345 }
0346
0347 spin_unlock(&uap->port.lock);
0348 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
0349 spin_lock(&uap->port.lock);
0350
0351 if (!sysrq)
0352 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
0353 }
0354
0355 return fifotaken;
0356 }
0357
0358
0359
0360
0361
0362
0363
0364 #ifdef CONFIG_DMA_ENGINE
0365
0366 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
0367
0368 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
0369 enum dma_data_direction dir)
0370 {
0371 dma_addr_t dma_addr;
0372
0373 sg->buf = dma_alloc_coherent(chan->device->dev,
0374 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
0375 if (!sg->buf)
0376 return -ENOMEM;
0377
0378 sg_init_table(&sg->sg, 1);
0379 sg_set_page(&sg->sg, phys_to_page(dma_addr),
0380 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
0381 sg_dma_address(&sg->sg) = dma_addr;
0382 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
0383
0384 return 0;
0385 }
0386
0387 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
0388 enum dma_data_direction dir)
0389 {
0390 if (sg->buf) {
0391 dma_free_coherent(chan->device->dev,
0392 PL011_DMA_BUFFER_SIZE, sg->buf,
0393 sg_dma_address(&sg->sg));
0394 }
0395 }
0396
0397 static void pl011_dma_probe(struct uart_amba_port *uap)
0398 {
0399
0400 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
0401 struct device *dev = uap->port.dev;
0402 struct dma_slave_config tx_conf = {
0403 .dst_addr = uap->port.mapbase +
0404 pl011_reg_to_offset(uap, REG_DR),
0405 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0406 .direction = DMA_MEM_TO_DEV,
0407 .dst_maxburst = uap->fifosize >> 1,
0408 .device_fc = false,
0409 };
0410 struct dma_chan *chan;
0411 dma_cap_mask_t mask;
0412
0413 uap->dma_probed = true;
0414 chan = dma_request_chan(dev, "tx");
0415 if (IS_ERR(chan)) {
0416 if (PTR_ERR(chan) == -EPROBE_DEFER) {
0417 uap->dma_probed = false;
0418 return;
0419 }
0420
0421
0422 if (!plat || !plat->dma_filter) {
0423 dev_info(uap->port.dev, "no DMA platform data\n");
0424 return;
0425 }
0426
0427
0428 dma_cap_zero(mask);
0429 dma_cap_set(DMA_SLAVE, mask);
0430
0431 chan = dma_request_channel(mask, plat->dma_filter,
0432 plat->dma_tx_param);
0433 if (!chan) {
0434 dev_err(uap->port.dev, "no TX DMA channel!\n");
0435 return;
0436 }
0437 }
0438
0439 dmaengine_slave_config(chan, &tx_conf);
0440 uap->dmatx.chan = chan;
0441
0442 dev_info(uap->port.dev, "DMA channel TX %s\n",
0443 dma_chan_name(uap->dmatx.chan));
0444
0445
0446 chan = dma_request_slave_channel(dev, "rx");
0447
0448 if (!chan && plat && plat->dma_rx_param) {
0449 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
0450
0451 if (!chan) {
0452 dev_err(uap->port.dev, "no RX DMA channel!\n");
0453 return;
0454 }
0455 }
0456
0457 if (chan) {
0458 struct dma_slave_config rx_conf = {
0459 .src_addr = uap->port.mapbase +
0460 pl011_reg_to_offset(uap, REG_DR),
0461 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
0462 .direction = DMA_DEV_TO_MEM,
0463 .src_maxburst = uap->fifosize >> 2,
0464 .device_fc = false,
0465 };
0466 struct dma_slave_caps caps;
0467
0468
0469
0470
0471
0472
0473 if (0 == dma_get_slave_caps(chan, &caps)) {
0474 if (caps.residue_granularity ==
0475 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
0476 dma_release_channel(chan);
0477 dev_info(uap->port.dev,
0478 "RX DMA disabled - no residue processing\n");
0479 return;
0480 }
0481 }
0482 dmaengine_slave_config(chan, &rx_conf);
0483 uap->dmarx.chan = chan;
0484
0485 uap->dmarx.auto_poll_rate = false;
0486 if (plat && plat->dma_rx_poll_enable) {
0487
0488 if (plat->dma_rx_poll_rate) {
0489 uap->dmarx.auto_poll_rate = false;
0490 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
0491 } else {
0492
0493
0494
0495
0496
0497 uap->dmarx.auto_poll_rate = true;
0498 uap->dmarx.poll_rate = 100;
0499 }
0500
0501 if (plat->dma_rx_poll_timeout)
0502 uap->dmarx.poll_timeout =
0503 plat->dma_rx_poll_timeout;
0504 else
0505 uap->dmarx.poll_timeout = 3000;
0506 } else if (!plat && dev->of_node) {
0507 uap->dmarx.auto_poll_rate = of_property_read_bool(
0508 dev->of_node, "auto-poll");
0509 if (uap->dmarx.auto_poll_rate) {
0510 u32 x;
0511
0512 if (0 == of_property_read_u32(dev->of_node,
0513 "poll-rate-ms", &x))
0514 uap->dmarx.poll_rate = x;
0515 else
0516 uap->dmarx.poll_rate = 100;
0517 if (0 == of_property_read_u32(dev->of_node,
0518 "poll-timeout-ms", &x))
0519 uap->dmarx.poll_timeout = x;
0520 else
0521 uap->dmarx.poll_timeout = 3000;
0522 }
0523 }
0524 dev_info(uap->port.dev, "DMA channel RX %s\n",
0525 dma_chan_name(uap->dmarx.chan));
0526 }
0527 }
0528
0529 static void pl011_dma_remove(struct uart_amba_port *uap)
0530 {
0531 if (uap->dmatx.chan)
0532 dma_release_channel(uap->dmatx.chan);
0533 if (uap->dmarx.chan)
0534 dma_release_channel(uap->dmarx.chan);
0535 }
0536
0537
0538 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
0539 static void pl011_start_tx_pio(struct uart_amba_port *uap);
0540
0541
0542
0543
0544
0545 static void pl011_dma_tx_callback(void *data)
0546 {
0547 struct uart_amba_port *uap = data;
0548 struct pl011_dmatx_data *dmatx = &uap->dmatx;
0549 unsigned long flags;
0550 u16 dmacr;
0551
0552 spin_lock_irqsave(&uap->port.lock, flags);
0553 if (uap->dmatx.queued)
0554 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
0555 DMA_TO_DEVICE);
0556
0557 dmacr = uap->dmacr;
0558 uap->dmacr = dmacr & ~UART011_TXDMAE;
0559 pl011_write(uap->dmacr, uap, REG_DMACR);
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
0571 uart_circ_empty(&uap->port.state->xmit)) {
0572 uap->dmatx.queued = false;
0573 spin_unlock_irqrestore(&uap->port.lock, flags);
0574 return;
0575 }
0576
0577 if (pl011_dma_tx_refill(uap) <= 0)
0578
0579
0580
0581
0582 pl011_start_tx_pio(uap);
0583
0584 spin_unlock_irqrestore(&uap->port.lock, flags);
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
0596 {
0597 struct pl011_dmatx_data *dmatx = &uap->dmatx;
0598 struct dma_chan *chan = dmatx->chan;
0599 struct dma_device *dma_dev = chan->device;
0600 struct dma_async_tx_descriptor *desc;
0601 struct circ_buf *xmit = &uap->port.state->xmit;
0602 unsigned int count;
0603
0604
0605
0606
0607
0608
0609
0610 count = uart_circ_chars_pending(xmit);
0611 if (count < (uap->fifosize >> 1)) {
0612 uap->dmatx.queued = false;
0613 return 0;
0614 }
0615
0616
0617
0618
0619
0620 count -= 1;
0621
0622
0623 if (count > PL011_DMA_BUFFER_SIZE)
0624 count = PL011_DMA_BUFFER_SIZE;
0625
0626 if (xmit->tail < xmit->head)
0627 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
0628 else {
0629 size_t first = UART_XMIT_SIZE - xmit->tail;
0630 size_t second;
0631
0632 if (first > count)
0633 first = count;
0634 second = count - first;
0635
0636 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
0637 if (second)
0638 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
0639 }
0640
0641 dmatx->sg.length = count;
0642
0643 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
0644 uap->dmatx.queued = false;
0645 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
0646 return -EBUSY;
0647 }
0648
0649 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
0650 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0651 if (!desc) {
0652 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
0653 uap->dmatx.queued = false;
0654
0655
0656
0657
0658 dev_dbg(uap->port.dev, "TX DMA busy\n");
0659 return -EBUSY;
0660 }
0661
0662
0663 desc->callback = pl011_dma_tx_callback;
0664 desc->callback_param = uap;
0665
0666
0667 dmaengine_submit(desc);
0668
0669
0670 dma_dev->device_issue_pending(chan);
0671
0672 uap->dmacr |= UART011_TXDMAE;
0673 pl011_write(uap->dmacr, uap, REG_DMACR);
0674 uap->dmatx.queued = true;
0675
0676
0677
0678
0679
0680 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
0681 uap->port.icount.tx += count;
0682
0683 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
0684 uart_write_wakeup(&uap->port);
0685
0686 return 1;
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
0698 {
0699 if (!uap->using_tx_dma)
0700 return false;
0701
0702
0703
0704
0705
0706
0707 if (uap->dmatx.queued) {
0708 uap->dmacr |= UART011_TXDMAE;
0709 pl011_write(uap->dmacr, uap, REG_DMACR);
0710 uap->im &= ~UART011_TXIM;
0711 pl011_write(uap->im, uap, REG_IMSC);
0712 return true;
0713 }
0714
0715
0716
0717
0718
0719 if (pl011_dma_tx_refill(uap) > 0) {
0720 uap->im &= ~UART011_TXIM;
0721 pl011_write(uap->im, uap, REG_IMSC);
0722 return true;
0723 }
0724 return false;
0725 }
0726
0727
0728
0729
0730
0731 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
0732 {
0733 if (uap->dmatx.queued) {
0734 uap->dmacr &= ~UART011_TXDMAE;
0735 pl011_write(uap->dmacr, uap, REG_DMACR);
0736 }
0737 }
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
0748 {
0749 u16 dmacr;
0750
0751 if (!uap->using_tx_dma)
0752 return false;
0753
0754 if (!uap->port.x_char) {
0755
0756 bool ret = true;
0757
0758 if (!uap->dmatx.queued) {
0759 if (pl011_dma_tx_refill(uap) > 0) {
0760 uap->im &= ~UART011_TXIM;
0761 pl011_write(uap->im, uap, REG_IMSC);
0762 } else
0763 ret = false;
0764 } else if (!(uap->dmacr & UART011_TXDMAE)) {
0765 uap->dmacr |= UART011_TXDMAE;
0766 pl011_write(uap->dmacr, uap, REG_DMACR);
0767 }
0768 return ret;
0769 }
0770
0771
0772
0773
0774
0775 dmacr = uap->dmacr;
0776 uap->dmacr &= ~UART011_TXDMAE;
0777 pl011_write(uap->dmacr, uap, REG_DMACR);
0778
0779 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
0780
0781
0782
0783
0784
0785 return false;
0786 }
0787
0788 pl011_write(uap->port.x_char, uap, REG_DR);
0789 uap->port.icount.tx++;
0790 uap->port.x_char = 0;
0791
0792
0793 uap->dmacr = dmacr;
0794 pl011_write(dmacr, uap, REG_DMACR);
0795
0796 return true;
0797 }
0798
0799
0800
0801
0802
0803 static void pl011_dma_flush_buffer(struct uart_port *port)
0804 __releases(&uap->port.lock)
0805 __acquires(&uap->port.lock)
0806 {
0807 struct uart_amba_port *uap =
0808 container_of(port, struct uart_amba_port, port);
0809
0810 if (!uap->using_tx_dma)
0811 return;
0812
0813 dmaengine_terminate_async(uap->dmatx.chan);
0814
0815 if (uap->dmatx.queued) {
0816 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
0817 DMA_TO_DEVICE);
0818 uap->dmatx.queued = false;
0819 uap->dmacr &= ~UART011_TXDMAE;
0820 pl011_write(uap->dmacr, uap, REG_DMACR);
0821 }
0822 }
0823
0824 static void pl011_dma_rx_callback(void *data);
0825
0826 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
0827 {
0828 struct dma_chan *rxchan = uap->dmarx.chan;
0829 struct pl011_dmarx_data *dmarx = &uap->dmarx;
0830 struct dma_async_tx_descriptor *desc;
0831 struct pl011_sgbuf *sgbuf;
0832
0833 if (!rxchan)
0834 return -EIO;
0835
0836
0837 sgbuf = uap->dmarx.use_buf_b ?
0838 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
0839 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
0840 DMA_DEV_TO_MEM,
0841 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0842
0843
0844
0845
0846
0847 if (!desc) {
0848 uap->dmarx.running = false;
0849 dmaengine_terminate_all(rxchan);
0850 return -EBUSY;
0851 }
0852
0853
0854 desc->callback = pl011_dma_rx_callback;
0855 desc->callback_param = uap;
0856 dmarx->cookie = dmaengine_submit(desc);
0857 dma_async_issue_pending(rxchan);
0858
0859 uap->dmacr |= UART011_RXDMAE;
0860 pl011_write(uap->dmacr, uap, REG_DMACR);
0861 uap->dmarx.running = true;
0862
0863 uap->im &= ~UART011_RXIM;
0864 pl011_write(uap->im, uap, REG_IMSC);
0865
0866 return 0;
0867 }
0868
0869
0870
0871
0872
0873
0874 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
0875 u32 pending, bool use_buf_b,
0876 bool readfifo)
0877 {
0878 struct tty_port *port = &uap->port.state->port;
0879 struct pl011_sgbuf *sgbuf = use_buf_b ?
0880 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
0881 int dma_count = 0;
0882 u32 fifotaken = 0;
0883
0884 struct pl011_dmarx_data *dmarx = &uap->dmarx;
0885 int dmataken = 0;
0886
0887 if (uap->dmarx.poll_rate) {
0888
0889 dmataken = sgbuf->sg.length - dmarx->last_residue;
0890
0891 if (pending >= dmataken)
0892 pending -= dmataken;
0893 }
0894
0895
0896 if (pending) {
0897
0898
0899
0900
0901
0902
0903 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
0904 pending);
0905
0906 uap->port.icount.rx += dma_count;
0907 if (dma_count < pending)
0908 dev_warn(uap->port.dev,
0909 "couldn't insert all characters (TTY is full?)\n");
0910 }
0911
0912
0913 if (uap->dmarx.poll_rate)
0914 dmarx->last_residue = sgbuf->sg.length;
0915
0916
0917
0918
0919
0920 if (dma_count == pending && readfifo) {
0921
0922 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
0923 UART011_FEIS, uap, REG_ICR);
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936 fifotaken = pl011_fifo_to_tty(uap);
0937 }
0938
0939 dev_vdbg(uap->port.dev,
0940 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
0941 dma_count, fifotaken);
0942 tty_flip_buffer_push(port);
0943 }
0944
0945 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
0946 {
0947 struct pl011_dmarx_data *dmarx = &uap->dmarx;
0948 struct dma_chan *rxchan = dmarx->chan;
0949 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
0950 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
0951 size_t pending;
0952 struct dma_tx_state state;
0953 enum dma_status dmastat;
0954
0955
0956
0957
0958
0959
0960 if (dmaengine_pause(rxchan))
0961 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
0962 dmastat = rxchan->device->device_tx_status(rxchan,
0963 dmarx->cookie, &state);
0964 if (dmastat != DMA_PAUSED)
0965 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
0966
0967
0968 uap->dmacr &= ~UART011_RXDMAE;
0969 pl011_write(uap->dmacr, uap, REG_DMACR);
0970 uap->dmarx.running = false;
0971
0972 pending = sgbuf->sg.length - state.residue;
0973 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
0974
0975 dmaengine_terminate_all(rxchan);
0976
0977
0978
0979
0980
0981 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
0982
0983
0984 dmarx->use_buf_b = !dmarx->use_buf_b;
0985 if (pl011_dma_rx_trigger_dma(uap)) {
0986 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
0987 "fall back to interrupt mode\n");
0988 uap->im |= UART011_RXIM;
0989 pl011_write(uap->im, uap, REG_IMSC);
0990 }
0991 }
0992
0993 static void pl011_dma_rx_callback(void *data)
0994 {
0995 struct uart_amba_port *uap = data;
0996 struct pl011_dmarx_data *dmarx = &uap->dmarx;
0997 struct dma_chan *rxchan = dmarx->chan;
0998 bool lastbuf = dmarx->use_buf_b;
0999 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1000 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1001 size_t pending;
1002 struct dma_tx_state state;
1003 int ret;
1004
1005
1006
1007
1008
1009
1010
1011
1012 spin_lock_irq(&uap->port.lock);
1013
1014
1015
1016
1017 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1018 pending = sgbuf->sg.length - state.residue;
1019 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1020
1021 dmaengine_terminate_all(rxchan);
1022
1023 uap->dmarx.running = false;
1024 dmarx->use_buf_b = !lastbuf;
1025 ret = pl011_dma_rx_trigger_dma(uap);
1026
1027 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1028 spin_unlock_irq(&uap->port.lock);
1029
1030
1031
1032
1033 if (ret) {
1034 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1035 "fall back to interrupt mode\n");
1036 uap->im |= UART011_RXIM;
1037 pl011_write(uap->im, uap, REG_IMSC);
1038 }
1039 }
1040
1041
1042
1043
1044
1045
1046 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1047 {
1048
1049 uap->dmacr &= ~UART011_RXDMAE;
1050 pl011_write(uap->dmacr, uap, REG_DMACR);
1051 }
1052
1053
1054
1055
1056
1057
1058 static void pl011_dma_rx_poll(struct timer_list *t)
1059 {
1060 struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1061 struct tty_port *port = &uap->port.state->port;
1062 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1063 struct dma_chan *rxchan = uap->dmarx.chan;
1064 unsigned long flags;
1065 unsigned int dmataken = 0;
1066 unsigned int size = 0;
1067 struct pl011_sgbuf *sgbuf;
1068 int dma_count;
1069 struct dma_tx_state state;
1070
1071 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1072 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1073 if (likely(state.residue < dmarx->last_residue)) {
1074 dmataken = sgbuf->sg.length - dmarx->last_residue;
1075 size = dmarx->last_residue - state.residue;
1076 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1077 size);
1078 if (dma_count == size)
1079 dmarx->last_residue = state.residue;
1080 dmarx->last_jiffies = jiffies;
1081 }
1082 tty_flip_buffer_push(port);
1083
1084
1085
1086
1087
1088 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1089 > uap->dmarx.poll_timeout) {
1090
1091 spin_lock_irqsave(&uap->port.lock, flags);
1092 pl011_dma_rx_stop(uap);
1093 uap->im |= UART011_RXIM;
1094 pl011_write(uap->im, uap, REG_IMSC);
1095 spin_unlock_irqrestore(&uap->port.lock, flags);
1096
1097 uap->dmarx.running = false;
1098 dmaengine_terminate_all(rxchan);
1099 del_timer(&uap->dmarx.timer);
1100 } else {
1101 mod_timer(&uap->dmarx.timer,
1102 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1103 }
1104 }
1105
1106 static void pl011_dma_startup(struct uart_amba_port *uap)
1107 {
1108 int ret;
1109
1110 if (!uap->dma_probed)
1111 pl011_dma_probe(uap);
1112
1113 if (!uap->dmatx.chan)
1114 return;
1115
1116 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1117 if (!uap->dmatx.buf) {
1118 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1119 uap->port.fifosize = uap->fifosize;
1120 return;
1121 }
1122
1123 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1124
1125
1126 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1127 uap->using_tx_dma = true;
1128
1129 if (!uap->dmarx.chan)
1130 goto skip_rx;
1131
1132
1133 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1134 DMA_FROM_DEVICE);
1135 if (ret) {
1136 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1137 "RX buffer A", ret);
1138 goto skip_rx;
1139 }
1140
1141 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1142 DMA_FROM_DEVICE);
1143 if (ret) {
1144 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1145 "RX buffer B", ret);
1146 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1147 DMA_FROM_DEVICE);
1148 goto skip_rx;
1149 }
1150
1151 uap->using_rx_dma = true;
1152
1153 skip_rx:
1154
1155 uap->dmacr |= UART011_DMAONERR;
1156 pl011_write(uap->dmacr, uap, REG_DMACR);
1157
1158
1159
1160
1161
1162
1163 if (uap->vendor->dma_threshold)
1164 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1165 uap, REG_ST_DMAWM);
1166
1167 if (uap->using_rx_dma) {
1168 if (pl011_dma_rx_trigger_dma(uap))
1169 dev_dbg(uap->port.dev, "could not trigger initial "
1170 "RX DMA job, fall back to interrupt mode\n");
1171 if (uap->dmarx.poll_rate) {
1172 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1173 mod_timer(&uap->dmarx.timer,
1174 jiffies +
1175 msecs_to_jiffies(uap->dmarx.poll_rate));
1176 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1177 uap->dmarx.last_jiffies = jiffies;
1178 }
1179 }
1180 }
1181
1182 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1183 {
1184 if (!(uap->using_tx_dma || uap->using_rx_dma))
1185 return;
1186
1187
1188 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1189 cpu_relax();
1190
1191 spin_lock_irq(&uap->port.lock);
1192 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1193 pl011_write(uap->dmacr, uap, REG_DMACR);
1194 spin_unlock_irq(&uap->port.lock);
1195
1196 if (uap->using_tx_dma) {
1197
1198 dmaengine_terminate_all(uap->dmatx.chan);
1199 if (uap->dmatx.queued) {
1200 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1201 DMA_TO_DEVICE);
1202 uap->dmatx.queued = false;
1203 }
1204
1205 kfree(uap->dmatx.buf);
1206 uap->using_tx_dma = false;
1207 }
1208
1209 if (uap->using_rx_dma) {
1210 dmaengine_terminate_all(uap->dmarx.chan);
1211
1212 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1213 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1214 if (uap->dmarx.poll_rate)
1215 del_timer_sync(&uap->dmarx.timer);
1216 uap->using_rx_dma = false;
1217 }
1218 }
1219
1220 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1221 {
1222 return uap->using_rx_dma;
1223 }
1224
1225 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1226 {
1227 return uap->using_rx_dma && uap->dmarx.running;
1228 }
1229
1230 #else
1231
1232 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1233 {
1234 }
1235
1236 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1237 {
1238 }
1239
1240 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1241 {
1242 }
1243
1244 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1245 {
1246 return false;
1247 }
1248
1249 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1250 {
1251 }
1252
1253 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1254 {
1255 return false;
1256 }
1257
1258 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1259 {
1260 }
1261
1262 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1263 {
1264 }
1265
1266 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1267 {
1268 return -EIO;
1269 }
1270
1271 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1272 {
1273 return false;
1274 }
1275
1276 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1277 {
1278 return false;
1279 }
1280
1281 #define pl011_dma_flush_buffer NULL
1282 #endif
1283
1284 static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
1285 {
1286
1287
1288
1289
1290 const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
1291 struct uart_port *port = &uap->port;
1292 int i = 0;
1293 u32 cr;
1294
1295
1296 while (!pl011_tx_empty(port)) {
1297 if (i > MAX_TX_DRAIN_ITERS) {
1298 dev_warn(port->dev,
1299 "timeout while draining hardware tx queue\n");
1300 break;
1301 }
1302
1303 udelay(uap->rs485_tx_drain_interval);
1304 i++;
1305 }
1306
1307 if (port->rs485.delay_rts_after_send)
1308 mdelay(port->rs485.delay_rts_after_send);
1309
1310 cr = pl011_read(uap, REG_CR);
1311
1312 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1313 cr &= ~UART011_CR_RTS;
1314 else
1315 cr |= UART011_CR_RTS;
1316
1317
1318 cr &= ~UART011_CR_TXE;
1319 cr |= UART011_CR_RXE;
1320 pl011_write(cr, uap, REG_CR);
1321
1322 uap->rs485_tx_started = false;
1323 }
1324
1325 static void pl011_stop_tx(struct uart_port *port)
1326 {
1327 struct uart_amba_port *uap =
1328 container_of(port, struct uart_amba_port, port);
1329
1330 uap->im &= ~UART011_TXIM;
1331 pl011_write(uap->im, uap, REG_IMSC);
1332 pl011_dma_tx_stop(uap);
1333
1334 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1335 pl011_rs485_tx_stop(uap);
1336 }
1337
1338 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1339
1340
1341 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1342 {
1343 if (pl011_tx_chars(uap, false)) {
1344 uap->im |= UART011_TXIM;
1345 pl011_write(uap->im, uap, REG_IMSC);
1346 }
1347 }
1348
1349 static void pl011_start_tx(struct uart_port *port)
1350 {
1351 struct uart_amba_port *uap =
1352 container_of(port, struct uart_amba_port, port);
1353
1354 if (!pl011_dma_tx_start(uap))
1355 pl011_start_tx_pio(uap);
1356 }
1357
1358 static void pl011_stop_rx(struct uart_port *port)
1359 {
1360 struct uart_amba_port *uap =
1361 container_of(port, struct uart_amba_port, port);
1362
1363 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1364 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1365 pl011_write(uap->im, uap, REG_IMSC);
1366
1367 pl011_dma_rx_stop(uap);
1368 }
1369
1370 static void pl011_throttle_rx(struct uart_port *port)
1371 {
1372 unsigned long flags;
1373
1374 spin_lock_irqsave(&port->lock, flags);
1375 pl011_stop_rx(port);
1376 spin_unlock_irqrestore(&port->lock, flags);
1377 }
1378
1379 static void pl011_enable_ms(struct uart_port *port)
1380 {
1381 struct uart_amba_port *uap =
1382 container_of(port, struct uart_amba_port, port);
1383
1384 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1385 pl011_write(uap->im, uap, REG_IMSC);
1386 }
1387
1388 static void pl011_rx_chars(struct uart_amba_port *uap)
1389 __releases(&uap->port.lock)
1390 __acquires(&uap->port.lock)
1391 {
1392 pl011_fifo_to_tty(uap);
1393
1394 spin_unlock(&uap->port.lock);
1395 tty_flip_buffer_push(&uap->port.state->port);
1396
1397
1398
1399
1400 if (pl011_dma_rx_available(uap)) {
1401 if (pl011_dma_rx_trigger_dma(uap)) {
1402 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1403 "fall back to interrupt mode again\n");
1404 uap->im |= UART011_RXIM;
1405 pl011_write(uap->im, uap, REG_IMSC);
1406 } else {
1407 #ifdef CONFIG_DMA_ENGINE
1408
1409 if (uap->dmarx.poll_rate) {
1410 uap->dmarx.last_jiffies = jiffies;
1411 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1412 mod_timer(&uap->dmarx.timer,
1413 jiffies +
1414 msecs_to_jiffies(uap->dmarx.poll_rate));
1415 }
1416 #endif
1417 }
1418 }
1419 spin_lock(&uap->port.lock);
1420 }
1421
1422 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1423 bool from_irq)
1424 {
1425 if (unlikely(!from_irq) &&
1426 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1427 return false;
1428
1429 pl011_write(c, uap, REG_DR);
1430 uap->port.icount.tx++;
1431
1432 return true;
1433 }
1434
1435 static void pl011_rs485_tx_start(struct uart_amba_port *uap)
1436 {
1437 struct uart_port *port = &uap->port;
1438 u32 cr;
1439
1440
1441 cr = pl011_read(uap, REG_CR);
1442 cr |= UART011_CR_TXE;
1443
1444
1445 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
1446 cr &= ~UART011_CR_RXE;
1447
1448 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
1449 cr &= ~UART011_CR_RTS;
1450 else
1451 cr |= UART011_CR_RTS;
1452
1453 pl011_write(cr, uap, REG_CR);
1454
1455 if (port->rs485.delay_rts_before_send)
1456 mdelay(port->rs485.delay_rts_before_send);
1457
1458 uap->rs485_tx_started = true;
1459 }
1460
1461
1462 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1463 {
1464 struct circ_buf *xmit = &uap->port.state->xmit;
1465 int count = uap->fifosize >> 1;
1466
1467 if (uap->port.x_char) {
1468 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1469 return true;
1470 uap->port.x_char = 0;
1471 --count;
1472 }
1473 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1474 pl011_stop_tx(&uap->port);
1475 return false;
1476 }
1477
1478 if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
1479 !uap->rs485_tx_started)
1480 pl011_rs485_tx_start(uap);
1481
1482
1483 if (pl011_dma_tx_irq(uap))
1484 return true;
1485
1486 do {
1487 if (likely(from_irq) && count-- == 0)
1488 break;
1489
1490 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1491 break;
1492
1493 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1494 } while (!uart_circ_empty(xmit));
1495
1496 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1497 uart_write_wakeup(&uap->port);
1498
1499 if (uart_circ_empty(xmit)) {
1500 pl011_stop_tx(&uap->port);
1501 return false;
1502 }
1503 return true;
1504 }
1505
1506 static void pl011_modem_status(struct uart_amba_port *uap)
1507 {
1508 unsigned int status, delta;
1509
1510 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1511
1512 delta = status ^ uap->old_status;
1513 uap->old_status = status;
1514
1515 if (!delta)
1516 return;
1517
1518 if (delta & UART01x_FR_DCD)
1519 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1520
1521 if (delta & uap->vendor->fr_dsr)
1522 uap->port.icount.dsr++;
1523
1524 if (delta & uap->vendor->fr_cts)
1525 uart_handle_cts_change(&uap->port,
1526 status & uap->vendor->fr_cts);
1527
1528 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1529 }
1530
1531 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1532 {
1533 if (!uap->vendor->cts_event_workaround)
1534 return;
1535
1536
1537 pl011_write(0x00, uap, REG_ICR);
1538
1539
1540
1541
1542
1543
1544 pl011_read(uap, REG_ICR);
1545 pl011_read(uap, REG_ICR);
1546 }
1547
1548 static irqreturn_t pl011_int(int irq, void *dev_id)
1549 {
1550 struct uart_amba_port *uap = dev_id;
1551 unsigned long flags;
1552 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1553 int handled = 0;
1554
1555 spin_lock_irqsave(&uap->port.lock, flags);
1556 status = pl011_read(uap, REG_RIS) & uap->im;
1557 if (status) {
1558 do {
1559 check_apply_cts_event_workaround(uap);
1560
1561 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1562 UART011_RXIS),
1563 uap, REG_ICR);
1564
1565 if (status & (UART011_RTIS|UART011_RXIS)) {
1566 if (pl011_dma_rx_running(uap))
1567 pl011_dma_rx_irq(uap);
1568 else
1569 pl011_rx_chars(uap);
1570 }
1571 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1572 UART011_CTSMIS|UART011_RIMIS))
1573 pl011_modem_status(uap);
1574 if (status & UART011_TXIS)
1575 pl011_tx_chars(uap, true);
1576
1577 if (pass_counter-- == 0)
1578 break;
1579
1580 status = pl011_read(uap, REG_RIS) & uap->im;
1581 } while (status != 0);
1582 handled = 1;
1583 }
1584
1585 spin_unlock_irqrestore(&uap->port.lock, flags);
1586
1587 return IRQ_RETVAL(handled);
1588 }
1589
1590 static unsigned int pl011_tx_empty(struct uart_port *port)
1591 {
1592 struct uart_amba_port *uap =
1593 container_of(port, struct uart_amba_port, port);
1594
1595
1596 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1597
1598 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1599 0 : TIOCSER_TEMT;
1600 }
1601
1602 static unsigned int pl011_get_mctrl(struct uart_port *port)
1603 {
1604 struct uart_amba_port *uap =
1605 container_of(port, struct uart_amba_port, port);
1606 unsigned int result = 0;
1607 unsigned int status = pl011_read(uap, REG_FR);
1608
1609 #define TIOCMBIT(uartbit, tiocmbit) \
1610 if (status & uartbit) \
1611 result |= tiocmbit
1612
1613 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1614 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1615 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1616 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1617 #undef TIOCMBIT
1618 return result;
1619 }
1620
1621 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1622 {
1623 struct uart_amba_port *uap =
1624 container_of(port, struct uart_amba_port, port);
1625 unsigned int cr;
1626
1627 cr = pl011_read(uap, REG_CR);
1628
1629 #define TIOCMBIT(tiocmbit, uartbit) \
1630 if (mctrl & tiocmbit) \
1631 cr |= uartbit; \
1632 else \
1633 cr &= ~uartbit
1634
1635 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1636 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1637 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1638 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1639 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1640
1641 if (port->status & UPSTAT_AUTORTS) {
1642
1643 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1644 }
1645 #undef TIOCMBIT
1646
1647 pl011_write(cr, uap, REG_CR);
1648 }
1649
1650 static void pl011_break_ctl(struct uart_port *port, int break_state)
1651 {
1652 struct uart_amba_port *uap =
1653 container_of(port, struct uart_amba_port, port);
1654 unsigned long flags;
1655 unsigned int lcr_h;
1656
1657 spin_lock_irqsave(&uap->port.lock, flags);
1658 lcr_h = pl011_read(uap, REG_LCRH_TX);
1659 if (break_state == -1)
1660 lcr_h |= UART01x_LCRH_BRK;
1661 else
1662 lcr_h &= ~UART01x_LCRH_BRK;
1663 pl011_write(lcr_h, uap, REG_LCRH_TX);
1664 spin_unlock_irqrestore(&uap->port.lock, flags);
1665 }
1666
1667 #ifdef CONFIG_CONSOLE_POLL
1668
1669 static void pl011_quiesce_irqs(struct uart_port *port)
1670 {
1671 struct uart_amba_port *uap =
1672 container_of(port, struct uart_amba_port, port);
1673
1674 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1689 REG_IMSC);
1690 }
1691
1692 static int pl011_get_poll_char(struct uart_port *port)
1693 {
1694 struct uart_amba_port *uap =
1695 container_of(port, struct uart_amba_port, port);
1696 unsigned int status;
1697
1698
1699
1700
1701
1702 pl011_quiesce_irqs(port);
1703
1704 status = pl011_read(uap, REG_FR);
1705 if (status & UART01x_FR_RXFE)
1706 return NO_POLL_CHAR;
1707
1708 return pl011_read(uap, REG_DR);
1709 }
1710
1711 static void pl011_put_poll_char(struct uart_port *port,
1712 unsigned char ch)
1713 {
1714 struct uart_amba_port *uap =
1715 container_of(port, struct uart_amba_port, port);
1716
1717 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1718 cpu_relax();
1719
1720 pl011_write(ch, uap, REG_DR);
1721 }
1722
1723 #endif
1724
1725 static int pl011_hwinit(struct uart_port *port)
1726 {
1727 struct uart_amba_port *uap =
1728 container_of(port, struct uart_amba_port, port);
1729 int retval;
1730
1731
1732 pinctrl_pm_select_default_state(port->dev);
1733
1734
1735
1736
1737 retval = clk_prepare_enable(uap->clk);
1738 if (retval)
1739 return retval;
1740
1741 uap->port.uartclk = clk_get_rate(uap->clk);
1742
1743
1744 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1745 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1746 uap, REG_ICR);
1747
1748
1749
1750
1751
1752 uap->im = pl011_read(uap, REG_IMSC);
1753 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1754
1755 if (dev_get_platdata(uap->port.dev)) {
1756 struct amba_pl011_data *plat;
1757
1758 plat = dev_get_platdata(uap->port.dev);
1759 if (plat->init)
1760 plat->init();
1761 }
1762 return 0;
1763 }
1764
1765 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1766 {
1767 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1768 pl011_reg_to_offset(uap, REG_LCRH_TX);
1769 }
1770
1771 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1772 {
1773 pl011_write(lcr_h, uap, REG_LCRH_RX);
1774 if (pl011_split_lcrh(uap)) {
1775 int i;
1776
1777
1778
1779
1780 for (i = 0; i < 10; ++i)
1781 pl011_write(0xff, uap, REG_MIS);
1782 pl011_write(lcr_h, uap, REG_LCRH_TX);
1783 }
1784 }
1785
1786 static int pl011_allocate_irq(struct uart_amba_port *uap)
1787 {
1788 pl011_write(uap->im, uap, REG_IMSC);
1789
1790 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1791 }
1792
1793
1794
1795
1796
1797
1798 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1799 {
1800 unsigned long flags;
1801 unsigned int i;
1802
1803 spin_lock_irqsave(&uap->port.lock, flags);
1804
1805
1806 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1807
1808
1809
1810
1811
1812
1813
1814 for (i = 0; i < uap->fifosize * 2; ++i) {
1815 if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1816 break;
1817
1818 pl011_read(uap, REG_DR);
1819 }
1820
1821 uap->im = UART011_RTIM;
1822 if (!pl011_dma_rx_running(uap))
1823 uap->im |= UART011_RXIM;
1824 pl011_write(uap->im, uap, REG_IMSC);
1825 spin_unlock_irqrestore(&uap->port.lock, flags);
1826 }
1827
1828 static void pl011_unthrottle_rx(struct uart_port *port)
1829 {
1830 struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1831
1832 pl011_enable_interrupts(uap);
1833 }
1834
1835 static int pl011_startup(struct uart_port *port)
1836 {
1837 struct uart_amba_port *uap =
1838 container_of(port, struct uart_amba_port, port);
1839 unsigned int cr;
1840 int retval;
1841
1842 retval = pl011_hwinit(port);
1843 if (retval)
1844 goto clk_dis;
1845
1846 retval = pl011_allocate_irq(uap);
1847 if (retval)
1848 goto clk_dis;
1849
1850 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1851
1852 spin_lock_irq(&uap->port.lock);
1853
1854 cr = pl011_read(uap, REG_CR);
1855 cr &= UART011_CR_RTS | UART011_CR_DTR;
1856 cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
1857
1858 if (!(port->rs485.flags & SER_RS485_ENABLED))
1859 cr |= UART011_CR_TXE;
1860
1861 pl011_write(cr, uap, REG_CR);
1862
1863 spin_unlock_irq(&uap->port.lock);
1864
1865
1866
1867
1868 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1869
1870
1871 pl011_dma_startup(uap);
1872
1873 pl011_enable_interrupts(uap);
1874
1875 return 0;
1876
1877 clk_dis:
1878 clk_disable_unprepare(uap->clk);
1879 return retval;
1880 }
1881
1882 static int sbsa_uart_startup(struct uart_port *port)
1883 {
1884 struct uart_amba_port *uap =
1885 container_of(port, struct uart_amba_port, port);
1886 int retval;
1887
1888 retval = pl011_hwinit(port);
1889 if (retval)
1890 return retval;
1891
1892 retval = pl011_allocate_irq(uap);
1893 if (retval)
1894 return retval;
1895
1896
1897 uap->old_status = 0;
1898
1899 pl011_enable_interrupts(uap);
1900
1901 return 0;
1902 }
1903
1904 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1905 unsigned int lcrh)
1906 {
1907 unsigned long val;
1908
1909 val = pl011_read(uap, lcrh);
1910 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1911 pl011_write(val, uap, lcrh);
1912 }
1913
1914
1915
1916
1917
1918
1919 static void pl011_disable_uart(struct uart_amba_port *uap)
1920 {
1921 unsigned int cr;
1922
1923 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1924 spin_lock_irq(&uap->port.lock);
1925 cr = pl011_read(uap, REG_CR);
1926 cr &= UART011_CR_RTS | UART011_CR_DTR;
1927 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1928 pl011_write(cr, uap, REG_CR);
1929 spin_unlock_irq(&uap->port.lock);
1930
1931
1932
1933
1934 pl011_shutdown_channel(uap, REG_LCRH_RX);
1935 if (pl011_split_lcrh(uap))
1936 pl011_shutdown_channel(uap, REG_LCRH_TX);
1937 }
1938
1939 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1940 {
1941 spin_lock_irq(&uap->port.lock);
1942
1943
1944 uap->im = 0;
1945 pl011_write(uap->im, uap, REG_IMSC);
1946 pl011_write(0xffff, uap, REG_ICR);
1947
1948 spin_unlock_irq(&uap->port.lock);
1949 }
1950
1951 static void pl011_shutdown(struct uart_port *port)
1952 {
1953 struct uart_amba_port *uap =
1954 container_of(port, struct uart_amba_port, port);
1955
1956 pl011_disable_interrupts(uap);
1957
1958 pl011_dma_shutdown(uap);
1959
1960 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started)
1961 pl011_rs485_tx_stop(uap);
1962
1963 free_irq(uap->port.irq, uap);
1964
1965 pl011_disable_uart(uap);
1966
1967
1968
1969
1970 clk_disable_unprepare(uap->clk);
1971
1972 pinctrl_pm_select_sleep_state(port->dev);
1973
1974 if (dev_get_platdata(uap->port.dev)) {
1975 struct amba_pl011_data *plat;
1976
1977 plat = dev_get_platdata(uap->port.dev);
1978 if (plat->exit)
1979 plat->exit();
1980 }
1981
1982 if (uap->port.ops->flush_buffer)
1983 uap->port.ops->flush_buffer(port);
1984 }
1985
1986 static void sbsa_uart_shutdown(struct uart_port *port)
1987 {
1988 struct uart_amba_port *uap =
1989 container_of(port, struct uart_amba_port, port);
1990
1991 pl011_disable_interrupts(uap);
1992
1993 free_irq(uap->port.irq, uap);
1994
1995 if (uap->port.ops->flush_buffer)
1996 uap->port.ops->flush_buffer(port);
1997 }
1998
1999 static void
2000 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
2001 {
2002 port->read_status_mask = UART011_DR_OE | 255;
2003 if (termios->c_iflag & INPCK)
2004 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
2005 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2006 port->read_status_mask |= UART011_DR_BE;
2007
2008
2009
2010
2011 port->ignore_status_mask = 0;
2012 if (termios->c_iflag & IGNPAR)
2013 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
2014 if (termios->c_iflag & IGNBRK) {
2015 port->ignore_status_mask |= UART011_DR_BE;
2016
2017
2018
2019
2020 if (termios->c_iflag & IGNPAR)
2021 port->ignore_status_mask |= UART011_DR_OE;
2022 }
2023
2024
2025
2026
2027 if ((termios->c_cflag & CREAD) == 0)
2028 port->ignore_status_mask |= UART_DUMMY_DR_RX;
2029 }
2030
2031 static void
2032 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
2033 struct ktermios *old)
2034 {
2035 struct uart_amba_port *uap =
2036 container_of(port, struct uart_amba_port, port);
2037 unsigned int lcr_h, old_cr;
2038 unsigned long flags;
2039 unsigned int baud, quot, clkdiv;
2040 unsigned int bits;
2041
2042 if (uap->vendor->oversampling)
2043 clkdiv = 8;
2044 else
2045 clkdiv = 16;
2046
2047
2048
2049
2050 baud = uart_get_baud_rate(port, termios, old, 0,
2051 port->uartclk / clkdiv);
2052 #ifdef CONFIG_DMA_ENGINE
2053
2054
2055
2056 if (uap->dmarx.auto_poll_rate)
2057 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
2058 #endif
2059
2060 if (baud > port->uartclk/16)
2061 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
2062 else
2063 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
2064
2065 switch (termios->c_cflag & CSIZE) {
2066 case CS5:
2067 lcr_h = UART01x_LCRH_WLEN_5;
2068 break;
2069 case CS6:
2070 lcr_h = UART01x_LCRH_WLEN_6;
2071 break;
2072 case CS7:
2073 lcr_h = UART01x_LCRH_WLEN_7;
2074 break;
2075 default:
2076 lcr_h = UART01x_LCRH_WLEN_8;
2077 break;
2078 }
2079 if (termios->c_cflag & CSTOPB)
2080 lcr_h |= UART01x_LCRH_STP2;
2081 if (termios->c_cflag & PARENB) {
2082 lcr_h |= UART01x_LCRH_PEN;
2083 if (!(termios->c_cflag & PARODD))
2084 lcr_h |= UART01x_LCRH_EPS;
2085 if (termios->c_cflag & CMSPAR)
2086 lcr_h |= UART011_LCRH_SPS;
2087 }
2088 if (uap->fifosize > 1)
2089 lcr_h |= UART01x_LCRH_FEN;
2090
2091 bits = tty_get_frame_size(termios->c_cflag);
2092
2093 spin_lock_irqsave(&port->lock, flags);
2094
2095
2096
2097
2098 uart_update_timeout(port, termios->c_cflag, baud);
2099
2100
2101
2102
2103
2104
2105 uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
2106
2107 pl011_setup_status_masks(port, termios);
2108
2109 if (UART_ENABLE_MS(port, termios->c_cflag))
2110 pl011_enable_ms(port);
2111
2112 if (port->rs485.flags & SER_RS485_ENABLED)
2113 termios->c_cflag &= ~CRTSCTS;
2114
2115 old_cr = pl011_read(uap, REG_CR);
2116
2117 if (termios->c_cflag & CRTSCTS) {
2118 if (old_cr & UART011_CR_RTS)
2119 old_cr |= UART011_CR_RTSEN;
2120
2121 old_cr |= UART011_CR_CTSEN;
2122 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2123 } else {
2124 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2125 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2126 }
2127
2128 if (uap->vendor->oversampling) {
2129 if (baud > port->uartclk / 16)
2130 old_cr |= ST_UART011_CR_OVSFACT;
2131 else
2132 old_cr &= ~ST_UART011_CR_OVSFACT;
2133 }
2134
2135
2136
2137
2138
2139
2140
2141 if (uap->vendor->oversampling) {
2142 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2143 quot -= 1;
2144 else if ((baud > 3250000) && (quot > 2))
2145 quot -= 2;
2146 }
2147
2148 pl011_write(quot & 0x3f, uap, REG_FBRD);
2149 pl011_write(quot >> 6, uap, REG_IBRD);
2150
2151
2152
2153
2154
2155
2156
2157 pl011_write_lcr_h(uap, lcr_h);
2158 pl011_write(old_cr, uap, REG_CR);
2159
2160 spin_unlock_irqrestore(&port->lock, flags);
2161 }
2162
2163 static void
2164 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2165 struct ktermios *old)
2166 {
2167 struct uart_amba_port *uap =
2168 container_of(port, struct uart_amba_port, port);
2169 unsigned long flags;
2170
2171 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2172
2173
2174 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2175 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2176 termios->c_cflag |= CS8 | CLOCAL;
2177
2178 spin_lock_irqsave(&port->lock, flags);
2179 uart_update_timeout(port, CS8, uap->fixed_baud);
2180 pl011_setup_status_masks(port, termios);
2181 spin_unlock_irqrestore(&port->lock, flags);
2182 }
2183
2184 static const char *pl011_type(struct uart_port *port)
2185 {
2186 struct uart_amba_port *uap =
2187 container_of(port, struct uart_amba_port, port);
2188 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2189 }
2190
2191
2192
2193
2194 static void pl011_config_port(struct uart_port *port, int flags)
2195 {
2196 if (flags & UART_CONFIG_TYPE)
2197 port->type = PORT_AMBA;
2198 }
2199
2200
2201
2202
2203 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2204 {
2205 int ret = 0;
2206 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2207 ret = -EINVAL;
2208 if (ser->irq < 0 || ser->irq >= nr_irqs)
2209 ret = -EINVAL;
2210 if (ser->baud_base < 9600)
2211 ret = -EINVAL;
2212 if (port->mapbase != (unsigned long) ser->iomem_base)
2213 ret = -EINVAL;
2214 return ret;
2215 }
2216
2217 static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
2218 struct serial_rs485 *rs485)
2219 {
2220 struct uart_amba_port *uap =
2221 container_of(port, struct uart_amba_port, port);
2222
2223 if (port->rs485.flags & SER_RS485_ENABLED)
2224 pl011_rs485_tx_stop(uap);
2225
2226
2227 if (rs485->flags & SER_RS485_ENABLED) {
2228 u32 cr = pl011_read(uap, REG_CR);
2229
2230 cr &= ~UART011_CR_RTSEN;
2231 pl011_write(cr, uap, REG_CR);
2232 port->status &= ~UPSTAT_AUTORTS;
2233 }
2234
2235 return 0;
2236 }
2237
2238 static const struct uart_ops amba_pl011_pops = {
2239 .tx_empty = pl011_tx_empty,
2240 .set_mctrl = pl011_set_mctrl,
2241 .get_mctrl = pl011_get_mctrl,
2242 .stop_tx = pl011_stop_tx,
2243 .start_tx = pl011_start_tx,
2244 .stop_rx = pl011_stop_rx,
2245 .throttle = pl011_throttle_rx,
2246 .unthrottle = pl011_unthrottle_rx,
2247 .enable_ms = pl011_enable_ms,
2248 .break_ctl = pl011_break_ctl,
2249 .startup = pl011_startup,
2250 .shutdown = pl011_shutdown,
2251 .flush_buffer = pl011_dma_flush_buffer,
2252 .set_termios = pl011_set_termios,
2253 .type = pl011_type,
2254 .config_port = pl011_config_port,
2255 .verify_port = pl011_verify_port,
2256 #ifdef CONFIG_CONSOLE_POLL
2257 .poll_init = pl011_hwinit,
2258 .poll_get_char = pl011_get_poll_char,
2259 .poll_put_char = pl011_put_poll_char,
2260 #endif
2261 };
2262
2263 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2264 {
2265 }
2266
2267 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2268 {
2269 return 0;
2270 }
2271
2272 static const struct uart_ops sbsa_uart_pops = {
2273 .tx_empty = pl011_tx_empty,
2274 .set_mctrl = sbsa_uart_set_mctrl,
2275 .get_mctrl = sbsa_uart_get_mctrl,
2276 .stop_tx = pl011_stop_tx,
2277 .start_tx = pl011_start_tx,
2278 .stop_rx = pl011_stop_rx,
2279 .startup = sbsa_uart_startup,
2280 .shutdown = sbsa_uart_shutdown,
2281 .set_termios = sbsa_uart_set_termios,
2282 .type = pl011_type,
2283 .config_port = pl011_config_port,
2284 .verify_port = pl011_verify_port,
2285 #ifdef CONFIG_CONSOLE_POLL
2286 .poll_init = pl011_hwinit,
2287 .poll_get_char = pl011_get_poll_char,
2288 .poll_put_char = pl011_put_poll_char,
2289 #endif
2290 };
2291
2292 static struct uart_amba_port *amba_ports[UART_NR];
2293
2294 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2295
2296 static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
2297 {
2298 struct uart_amba_port *uap =
2299 container_of(port, struct uart_amba_port, port);
2300
2301 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2302 cpu_relax();
2303 pl011_write(ch, uap, REG_DR);
2304 }
2305
2306 static void
2307 pl011_console_write(struct console *co, const char *s, unsigned int count)
2308 {
2309 struct uart_amba_port *uap = amba_ports[co->index];
2310 unsigned int old_cr = 0, new_cr;
2311 unsigned long flags;
2312 int locked = 1;
2313
2314 clk_enable(uap->clk);
2315
2316 local_irq_save(flags);
2317 if (uap->port.sysrq)
2318 locked = 0;
2319 else if (oops_in_progress)
2320 locked = spin_trylock(&uap->port.lock);
2321 else
2322 spin_lock(&uap->port.lock);
2323
2324
2325
2326
2327 if (!uap->vendor->always_enabled) {
2328 old_cr = pl011_read(uap, REG_CR);
2329 new_cr = old_cr & ~UART011_CR_CTSEN;
2330 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2331 pl011_write(new_cr, uap, REG_CR);
2332 }
2333
2334 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2335
2336
2337
2338
2339
2340
2341 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2342 & uap->vendor->fr_busy)
2343 cpu_relax();
2344 if (!uap->vendor->always_enabled)
2345 pl011_write(old_cr, uap, REG_CR);
2346
2347 if (locked)
2348 spin_unlock(&uap->port.lock);
2349 local_irq_restore(flags);
2350
2351 clk_disable(uap->clk);
2352 }
2353
2354 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2355 int *parity, int *bits)
2356 {
2357 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2358 unsigned int lcr_h, ibrd, fbrd;
2359
2360 lcr_h = pl011_read(uap, REG_LCRH_TX);
2361
2362 *parity = 'n';
2363 if (lcr_h & UART01x_LCRH_PEN) {
2364 if (lcr_h & UART01x_LCRH_EPS)
2365 *parity = 'e';
2366 else
2367 *parity = 'o';
2368 }
2369
2370 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2371 *bits = 7;
2372 else
2373 *bits = 8;
2374
2375 ibrd = pl011_read(uap, REG_IBRD);
2376 fbrd = pl011_read(uap, REG_FBRD);
2377
2378 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2379
2380 if (uap->vendor->oversampling) {
2381 if (pl011_read(uap, REG_CR)
2382 & ST_UART011_CR_OVSFACT)
2383 *baud *= 2;
2384 }
2385 }
2386 }
2387
2388 static int pl011_console_setup(struct console *co, char *options)
2389 {
2390 struct uart_amba_port *uap;
2391 int baud = 38400;
2392 int bits = 8;
2393 int parity = 'n';
2394 int flow = 'n';
2395 int ret;
2396
2397
2398
2399
2400
2401
2402 if (co->index >= UART_NR)
2403 co->index = 0;
2404 uap = amba_ports[co->index];
2405 if (!uap)
2406 return -ENODEV;
2407
2408
2409 pinctrl_pm_select_default_state(uap->port.dev);
2410
2411 ret = clk_prepare(uap->clk);
2412 if (ret)
2413 return ret;
2414
2415 if (dev_get_platdata(uap->port.dev)) {
2416 struct amba_pl011_data *plat;
2417
2418 plat = dev_get_platdata(uap->port.dev);
2419 if (plat->init)
2420 plat->init();
2421 }
2422
2423 uap->port.uartclk = clk_get_rate(uap->clk);
2424
2425 if (uap->vendor->fixed_options) {
2426 baud = uap->fixed_baud;
2427 } else {
2428 if (options)
2429 uart_parse_options(options,
2430 &baud, &parity, &bits, &flow);
2431 else
2432 pl011_console_get_options(uap, &baud, &parity, &bits);
2433 }
2434
2435 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2436 }
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 static int pl011_console_match(struct console *co, char *name, int idx,
2457 char *options)
2458 {
2459 unsigned char iotype;
2460 resource_size_t addr;
2461 int i;
2462
2463
2464
2465
2466
2467
2468
2469 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2470 return -ENODEV;
2471
2472 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2473 return -ENODEV;
2474
2475 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2476 return -ENODEV;
2477
2478
2479 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2480 struct uart_port *port;
2481
2482 if (!amba_ports[i])
2483 continue;
2484
2485 port = &amba_ports[i]->port;
2486
2487 if (port->mapbase != addr)
2488 continue;
2489
2490 co->index = i;
2491 port->cons = co;
2492 return pl011_console_setup(co, options);
2493 }
2494
2495 return -ENODEV;
2496 }
2497
2498 static struct uart_driver amba_reg;
2499 static struct console amba_console = {
2500 .name = "ttyAMA",
2501 .write = pl011_console_write,
2502 .device = uart_console_device,
2503 .setup = pl011_console_setup,
2504 .match = pl011_console_match,
2505 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2506 .index = -1,
2507 .data = &amba_reg,
2508 };
2509
2510 #define AMBA_CONSOLE (&amba_console)
2511
2512 static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
2513 {
2514 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2515 cpu_relax();
2516 writel(c, port->membase + UART01x_DR);
2517 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2518 cpu_relax();
2519 }
2520
2521 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2522 {
2523 struct earlycon_device *dev = con->data;
2524
2525 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2526 }
2527
2528 static void pl011_putc(struct uart_port *port, unsigned char c)
2529 {
2530 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2531 cpu_relax();
2532 if (port->iotype == UPIO_MEM32)
2533 writel(c, port->membase + UART01x_DR);
2534 else
2535 writeb(c, port->membase + UART01x_DR);
2536 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2537 cpu_relax();
2538 }
2539
2540 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2541 {
2542 struct earlycon_device *dev = con->data;
2543
2544 uart_console_write(&dev->port, s, n, pl011_putc);
2545 }
2546
2547 #ifdef CONFIG_CONSOLE_POLL
2548 static int pl011_getc(struct uart_port *port)
2549 {
2550 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2551 return NO_POLL_CHAR;
2552
2553 if (port->iotype == UPIO_MEM32)
2554 return readl(port->membase + UART01x_DR);
2555 else
2556 return readb(port->membase + UART01x_DR);
2557 }
2558
2559 static int pl011_early_read(struct console *con, char *s, unsigned int n)
2560 {
2561 struct earlycon_device *dev = con->data;
2562 int ch, num_read = 0;
2563
2564 while (num_read < n) {
2565 ch = pl011_getc(&dev->port);
2566 if (ch == NO_POLL_CHAR)
2567 break;
2568
2569 s[num_read++] = ch;
2570 }
2571
2572 return num_read;
2573 }
2574 #else
2575 #define pl011_early_read NULL
2576 #endif
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590 static int __init pl011_early_console_setup(struct earlycon_device *device,
2591 const char *opt)
2592 {
2593 if (!device->port.membase)
2594 return -ENODEV;
2595
2596 device->con->write = pl011_early_write;
2597 device->con->read = pl011_early_read;
2598
2599 return 0;
2600 }
2601 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2602 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614 static int __init
2615 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2616 const char *opt)
2617 {
2618 if (!device->port.membase)
2619 return -ENODEV;
2620
2621 device->con->write = qdf2400_e44_early_write;
2622 return 0;
2623 }
2624 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2625
2626 #else
2627 #define AMBA_CONSOLE NULL
2628 #endif
2629
2630 static struct uart_driver amba_reg = {
2631 .owner = THIS_MODULE,
2632 .driver_name = "ttyAMA",
2633 .dev_name = "ttyAMA",
2634 .major = SERIAL_AMBA_MAJOR,
2635 .minor = SERIAL_AMBA_MINOR,
2636 .nr = UART_NR,
2637 .cons = AMBA_CONSOLE,
2638 };
2639
2640 static int pl011_probe_dt_alias(int index, struct device *dev)
2641 {
2642 struct device_node *np;
2643 static bool seen_dev_with_alias = false;
2644 static bool seen_dev_without_alias = false;
2645 int ret = index;
2646
2647 if (!IS_ENABLED(CONFIG_OF))
2648 return ret;
2649
2650 np = dev->of_node;
2651 if (!np)
2652 return ret;
2653
2654 ret = of_alias_get_id(np, "serial");
2655 if (ret < 0) {
2656 seen_dev_without_alias = true;
2657 ret = index;
2658 } else {
2659 seen_dev_with_alias = true;
2660 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2661 dev_warn(dev, "requested serial port %d not available.\n", ret);
2662 ret = index;
2663 }
2664 }
2665
2666 if (seen_dev_with_alias && seen_dev_without_alias)
2667 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2668
2669 return ret;
2670 }
2671
2672
2673 static void pl011_unregister_port(struct uart_amba_port *uap)
2674 {
2675 int i;
2676 bool busy = false;
2677
2678 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2679 if (amba_ports[i] == uap)
2680 amba_ports[i] = NULL;
2681 else if (amba_ports[i])
2682 busy = true;
2683 }
2684 pl011_dma_remove(uap);
2685 if (!busy)
2686 uart_unregister_driver(&amba_reg);
2687 }
2688
2689 static int pl011_find_free_port(void)
2690 {
2691 int i;
2692
2693 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2694 if (amba_ports[i] == NULL)
2695 return i;
2696
2697 return -EBUSY;
2698 }
2699
2700 static int pl011_get_rs485_mode(struct uart_amba_port *uap)
2701 {
2702 struct uart_port *port = &uap->port;
2703 int ret;
2704
2705 ret = uart_get_rs485_mode(port);
2706 if (ret)
2707 return ret;
2708
2709 return 0;
2710 }
2711
2712 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2713 struct resource *mmiobase, int index)
2714 {
2715 void __iomem *base;
2716 int ret;
2717
2718 base = devm_ioremap_resource(dev, mmiobase);
2719 if (IS_ERR(base))
2720 return PTR_ERR(base);
2721
2722 index = pl011_probe_dt_alias(index, dev);
2723
2724 uap->port.dev = dev;
2725 uap->port.mapbase = mmiobase->start;
2726 uap->port.membase = base;
2727 uap->port.fifosize = uap->fifosize;
2728 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2729 uap->port.flags = UPF_BOOT_AUTOCONF;
2730 uap->port.line = index;
2731
2732 ret = pl011_get_rs485_mode(uap);
2733 if (ret)
2734 return ret;
2735
2736 amba_ports[index] = uap;
2737
2738 return 0;
2739 }
2740
2741 static int pl011_register_port(struct uart_amba_port *uap)
2742 {
2743 int ret, i;
2744
2745
2746 pl011_write(0, uap, REG_IMSC);
2747 pl011_write(0xffff, uap, REG_ICR);
2748
2749 if (!amba_reg.state) {
2750 ret = uart_register_driver(&amba_reg);
2751 if (ret < 0) {
2752 dev_err(uap->port.dev,
2753 "Failed to register AMBA-PL011 driver\n");
2754 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2755 if (amba_ports[i] == uap)
2756 amba_ports[i] = NULL;
2757 return ret;
2758 }
2759 }
2760
2761 ret = uart_add_one_port(&amba_reg, &uap->port);
2762 if (ret)
2763 pl011_unregister_port(uap);
2764
2765 return ret;
2766 }
2767
2768 static const struct serial_rs485 pl011_rs485_supported = {
2769 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
2770 SER_RS485_RX_DURING_TX,
2771 .delay_rts_before_send = 1,
2772 .delay_rts_after_send = 1,
2773 };
2774
2775 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2776 {
2777 struct uart_amba_port *uap;
2778 struct vendor_data *vendor = id->data;
2779 int portnr, ret;
2780
2781 portnr = pl011_find_free_port();
2782 if (portnr < 0)
2783 return portnr;
2784
2785 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2786 GFP_KERNEL);
2787 if (!uap)
2788 return -ENOMEM;
2789
2790 uap->clk = devm_clk_get(&dev->dev, NULL);
2791 if (IS_ERR(uap->clk))
2792 return PTR_ERR(uap->clk);
2793
2794 uap->reg_offset = vendor->reg_offset;
2795 uap->vendor = vendor;
2796 uap->fifosize = vendor->get_fifosize(dev);
2797 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2798 uap->port.irq = dev->irq[0];
2799 uap->port.ops = &amba_pl011_pops;
2800 uap->port.rs485_config = pl011_rs485_config;
2801 uap->port.rs485_supported = pl011_rs485_supported;
2802 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2803
2804 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2805 if (ret)
2806 return ret;
2807
2808 amba_set_drvdata(dev, uap);
2809
2810 return pl011_register_port(uap);
2811 }
2812
2813 static void pl011_remove(struct amba_device *dev)
2814 {
2815 struct uart_amba_port *uap = amba_get_drvdata(dev);
2816
2817 uart_remove_one_port(&amba_reg, &uap->port);
2818 pl011_unregister_port(uap);
2819 }
2820
2821 #ifdef CONFIG_PM_SLEEP
2822 static int pl011_suspend(struct device *dev)
2823 {
2824 struct uart_amba_port *uap = dev_get_drvdata(dev);
2825
2826 if (!uap)
2827 return -EINVAL;
2828
2829 return uart_suspend_port(&amba_reg, &uap->port);
2830 }
2831
2832 static int pl011_resume(struct device *dev)
2833 {
2834 struct uart_amba_port *uap = dev_get_drvdata(dev);
2835
2836 if (!uap)
2837 return -EINVAL;
2838
2839 return uart_resume_port(&amba_reg, &uap->port);
2840 }
2841 #endif
2842
2843 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2844
2845 static int sbsa_uart_probe(struct platform_device *pdev)
2846 {
2847 struct uart_amba_port *uap;
2848 struct resource *r;
2849 int portnr, ret;
2850 int baudrate;
2851
2852
2853
2854
2855
2856 if (pdev->dev.of_node) {
2857 struct device_node *np = pdev->dev.of_node;
2858
2859 ret = of_property_read_u32(np, "current-speed", &baudrate);
2860 if (ret)
2861 return ret;
2862 } else {
2863 baudrate = 115200;
2864 }
2865
2866 portnr = pl011_find_free_port();
2867 if (portnr < 0)
2868 return portnr;
2869
2870 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2871 GFP_KERNEL);
2872 if (!uap)
2873 return -ENOMEM;
2874
2875 ret = platform_get_irq(pdev, 0);
2876 if (ret < 0)
2877 return ret;
2878 uap->port.irq = ret;
2879
2880 #ifdef CONFIG_ACPI_SPCR_TABLE
2881 if (qdf2400_e44_present) {
2882 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2883 uap->vendor = &vendor_qdt_qdf2400_e44;
2884 } else
2885 #endif
2886 uap->vendor = &vendor_sbsa;
2887
2888 uap->reg_offset = uap->vendor->reg_offset;
2889 uap->fifosize = 32;
2890 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2891 uap->port.ops = &sbsa_uart_pops;
2892 uap->fixed_baud = baudrate;
2893
2894 snprintf(uap->type, sizeof(uap->type), "SBSA");
2895
2896 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2897
2898 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2899 if (ret)
2900 return ret;
2901
2902 platform_set_drvdata(pdev, uap);
2903
2904 return pl011_register_port(uap);
2905 }
2906
2907 static int sbsa_uart_remove(struct platform_device *pdev)
2908 {
2909 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2910
2911 uart_remove_one_port(&amba_reg, &uap->port);
2912 pl011_unregister_port(uap);
2913 return 0;
2914 }
2915
2916 static const struct of_device_id sbsa_uart_of_match[] = {
2917 { .compatible = "arm,sbsa-uart", },
2918 {},
2919 };
2920 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2921
2922 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
2923 { "ARMH0011", 0 },
2924 { "ARMHB000", 0 },
2925 {},
2926 };
2927 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2928
2929 static struct platform_driver arm_sbsa_uart_platform_driver = {
2930 .probe = sbsa_uart_probe,
2931 .remove = sbsa_uart_remove,
2932 .driver = {
2933 .name = "sbsa-uart",
2934 .pm = &pl011_dev_pm_ops,
2935 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2936 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2937 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2938 },
2939 };
2940
2941 static const struct amba_id pl011_ids[] = {
2942 {
2943 .id = 0x00041011,
2944 .mask = 0x000fffff,
2945 .data = &vendor_arm,
2946 },
2947 {
2948 .id = 0x00380802,
2949 .mask = 0x00ffffff,
2950 .data = &vendor_st,
2951 },
2952 { 0, 0 },
2953 };
2954
2955 MODULE_DEVICE_TABLE(amba, pl011_ids);
2956
2957 static struct amba_driver pl011_driver = {
2958 .drv = {
2959 .name = "uart-pl011",
2960 .pm = &pl011_dev_pm_ops,
2961 .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2962 },
2963 .id_table = pl011_ids,
2964 .probe = pl011_probe,
2965 .remove = pl011_remove,
2966 };
2967
2968 static int __init pl011_init(void)
2969 {
2970 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2971
2972 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2973 pr_warn("could not register SBSA UART platform driver\n");
2974 return amba_driver_register(&pl011_driver);
2975 }
2976
2977 static void __exit pl011_exit(void)
2978 {
2979 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2980 amba_driver_unregister(&pl011_driver);
2981 }
2982
2983
2984
2985
2986
2987 arch_initcall(pl011_init);
2988 module_exit(pl011_exit);
2989
2990 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2991 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2992 MODULE_LICENSE("GPL");