0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/clk.h>
0015 #include <linux/completion.h>
0016 #include <linux/debugfs.h>
0017 #include <linux/delay.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/dmaengine.h>
0020 #include <linux/err.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/io.h>
0023 #include <linux/kernel.h>
0024 #include <linux/module.h>
0025 #include <linux/of.h>
0026 #include <linux/of_address.h>
0027 #include <linux/of_device.h>
0028 #include <linux/gpio/consumer.h>
0029 #include <linux/gpio/machine.h> /* FIXME: using chip internals */
0030 #include <linux/gpio/driver.h> /* FIXME: using chip internals */
0031 #include <linux/of_irq.h>
0032 #include <linux/spi/spi.h>
0033
0034
0035 #define BCM2835_SPI_CS 0x00
0036 #define BCM2835_SPI_FIFO 0x04
0037 #define BCM2835_SPI_CLK 0x08
0038 #define BCM2835_SPI_DLEN 0x0c
0039 #define BCM2835_SPI_LTOH 0x10
0040 #define BCM2835_SPI_DC 0x14
0041
0042
0043 #define BCM2835_SPI_CS_LEN_LONG 0x02000000
0044 #define BCM2835_SPI_CS_DMA_LEN 0x01000000
0045 #define BCM2835_SPI_CS_CSPOL2 0x00800000
0046 #define BCM2835_SPI_CS_CSPOL1 0x00400000
0047 #define BCM2835_SPI_CS_CSPOL0 0x00200000
0048 #define BCM2835_SPI_CS_RXF 0x00100000
0049 #define BCM2835_SPI_CS_RXR 0x00080000
0050 #define BCM2835_SPI_CS_TXD 0x00040000
0051 #define BCM2835_SPI_CS_RXD 0x00020000
0052 #define BCM2835_SPI_CS_DONE 0x00010000
0053 #define BCM2835_SPI_CS_LEN 0x00002000
0054 #define BCM2835_SPI_CS_REN 0x00001000
0055 #define BCM2835_SPI_CS_ADCS 0x00000800
0056 #define BCM2835_SPI_CS_INTR 0x00000400
0057 #define BCM2835_SPI_CS_INTD 0x00000200
0058 #define BCM2835_SPI_CS_DMAEN 0x00000100
0059 #define BCM2835_SPI_CS_TA 0x00000080
0060 #define BCM2835_SPI_CS_CSPOL 0x00000040
0061 #define BCM2835_SPI_CS_CLEAR_RX 0x00000020
0062 #define BCM2835_SPI_CS_CLEAR_TX 0x00000010
0063 #define BCM2835_SPI_CS_CPOL 0x00000008
0064 #define BCM2835_SPI_CS_CPHA 0x00000004
0065 #define BCM2835_SPI_CS_CS_10 0x00000002
0066 #define BCM2835_SPI_CS_CS_01 0x00000001
0067
0068 #define BCM2835_SPI_FIFO_SIZE 64
0069 #define BCM2835_SPI_FIFO_SIZE_3_4 48
0070 #define BCM2835_SPI_DMA_MIN_LENGTH 96
0071 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
0072 | SPI_NO_CS | SPI_3WIRE)
0073
0074 #define DRV_NAME "spi-bcm2835"
0075
0076
0077 static unsigned int polling_limit_us = 30;
0078 module_param(polling_limit_us, uint, 0664);
0079 MODULE_PARM_DESC(polling_limit_us,
0080 "time in us to run a transfer in polling mode\n");
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 struct bcm2835_spi {
0118 void __iomem *regs;
0119 struct clk *clk;
0120 unsigned long clk_hz;
0121 int irq;
0122 struct spi_transfer *tfr;
0123 struct spi_controller *ctlr;
0124 const u8 *tx_buf;
0125 u8 *rx_buf;
0126 int tx_len;
0127 int rx_len;
0128 int tx_prologue;
0129 int rx_prologue;
0130 unsigned int tx_spillover;
0131
0132 struct dentry *debugfs_dir;
0133 u64 count_transfer_polling;
0134 u64 count_transfer_irq;
0135 u64 count_transfer_irq_after_polling;
0136 u64 count_transfer_dma;
0137
0138 struct bcm2835_spidev *slv;
0139 unsigned int tx_dma_active;
0140 unsigned int rx_dma_active;
0141 struct dma_async_tx_descriptor *fill_tx_desc;
0142 dma_addr_t fill_tx_addr;
0143 };
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 struct bcm2835_spidev {
0156 u32 prepare_cs;
0157 struct dma_async_tx_descriptor *clear_rx_desc;
0158 dma_addr_t clear_rx_addr;
0159 u32 clear_rx_cs ____cacheline_aligned;
0160 };
0161
0162 #if defined(CONFIG_DEBUG_FS)
0163 static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
0164 const char *dname)
0165 {
0166 char name[64];
0167 struct dentry *dir;
0168
0169
0170 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
0171
0172
0173 dir = debugfs_create_dir(name, NULL);
0174 bs->debugfs_dir = dir;
0175
0176
0177 debugfs_create_u64("count_transfer_polling", 0444, dir,
0178 &bs->count_transfer_polling);
0179 debugfs_create_u64("count_transfer_irq", 0444, dir,
0180 &bs->count_transfer_irq);
0181 debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
0182 &bs->count_transfer_irq_after_polling);
0183 debugfs_create_u64("count_transfer_dma", 0444, dir,
0184 &bs->count_transfer_dma);
0185 }
0186
0187 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
0188 {
0189 debugfs_remove_recursive(bs->debugfs_dir);
0190 bs->debugfs_dir = NULL;
0191 }
0192 #else
0193 static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
0194 const char *dname)
0195 {
0196 }
0197
0198 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
0199 {
0200 }
0201 #endif
0202
0203 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
0204 {
0205 return readl(bs->regs + reg);
0206 }
0207
0208 static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
0209 {
0210 writel(val, bs->regs + reg);
0211 }
0212
0213 static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
0214 {
0215 u8 byte;
0216
0217 while ((bs->rx_len) &&
0218 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
0219 byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
0220 if (bs->rx_buf)
0221 *bs->rx_buf++ = byte;
0222 bs->rx_len--;
0223 }
0224 }
0225
0226 static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
0227 {
0228 u8 byte;
0229
0230 while ((bs->tx_len) &&
0231 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
0232 byte = bs->tx_buf ? *bs->tx_buf++ : 0;
0233 bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
0234 bs->tx_len--;
0235 }
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
0249 {
0250 u32 val;
0251 int len;
0252
0253 bs->rx_len -= count;
0254
0255 do {
0256 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
0257 len = min(count, 4);
0258 memcpy(bs->rx_buf, &val, len);
0259 bs->rx_buf += len;
0260 count -= 4;
0261 } while (count > 0);
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
0275 {
0276 u32 val;
0277 int len;
0278
0279 bs->tx_len -= count;
0280
0281 do {
0282 if (bs->tx_buf) {
0283 len = min(count, 4);
0284 memcpy(&val, bs->tx_buf, len);
0285 bs->tx_buf += len;
0286 } else {
0287 val = 0;
0288 }
0289 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
0290 count -= 4;
0291 } while (count > 0);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
0303 {
0304 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
0305 cpu_relax();
0306 }
0307
0308
0309
0310
0311
0312
0313 static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
0314 {
0315 u8 val;
0316
0317 count = min(count, bs->rx_len);
0318 bs->rx_len -= count;
0319
0320 do {
0321 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
0322 if (bs->rx_buf)
0323 *bs->rx_buf++ = val;
0324 } while (--count);
0325 }
0326
0327
0328
0329
0330
0331
0332 static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
0333 {
0334 u8 val;
0335
0336 count = min(count, bs->tx_len);
0337 bs->tx_len -= count;
0338
0339 do {
0340 val = bs->tx_buf ? *bs->tx_buf++ : 0;
0341 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
0342 } while (--count);
0343 }
0344
0345 static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
0346 {
0347 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
0348
0349
0350 cs &= ~(BCM2835_SPI_CS_INTR |
0351 BCM2835_SPI_CS_INTD |
0352 BCM2835_SPI_CS_DMAEN |
0353 BCM2835_SPI_CS_TA);
0354
0355
0356
0357
0358
0359
0360 cs |= BCM2835_SPI_CS_DONE;
0361
0362 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
0363
0364
0365 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
0366
0367 bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
0368 }
0369
0370 static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
0371 {
0372 struct bcm2835_spi *bs = dev_id;
0373 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
0374
0375
0376 if (!(cs & BCM2835_SPI_CS_INTR))
0377 return IRQ_NONE;
0378
0379
0380
0381
0382
0383 if (cs & BCM2835_SPI_CS_RXF)
0384 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
0385 else if (cs & BCM2835_SPI_CS_RXR)
0386 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
0387
0388 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
0389 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
0390
0391
0392 bcm2835_rd_fifo(bs);
0393
0394 bcm2835_wr_fifo(bs);
0395
0396 if (!bs->rx_len) {
0397
0398 bcm2835_spi_reset_hw(bs);
0399
0400 spi_finalize_current_transfer(bs->ctlr);
0401 }
0402
0403 return IRQ_HANDLED;
0404 }
0405
0406 static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
0407 struct spi_device *spi,
0408 struct spi_transfer *tfr,
0409 u32 cs, bool fifo_empty)
0410 {
0411 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
0412
0413
0414 bs->count_transfer_irq++;
0415
0416
0417
0418
0419
0420 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
0421
0422
0423 if (fifo_empty)
0424 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
0425 bcm2835_wr_fifo(bs);
0426
0427
0428 cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
0429 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
0430
0431
0432 return 1;
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
0482 struct spi_transfer *tfr,
0483 struct bcm2835_spi *bs,
0484 u32 cs)
0485 {
0486 int tx_remaining;
0487
0488 bs->tfr = tfr;
0489 bs->tx_prologue = 0;
0490 bs->rx_prologue = 0;
0491 bs->tx_spillover = false;
0492
0493 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
0494 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
0495
0496 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
0497 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
0498
0499 if (bs->rx_prologue > bs->tx_prologue) {
0500 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
0501 bs->tx_prologue = bs->rx_prologue;
0502 } else {
0503 bs->tx_prologue += 4;
0504 bs->tx_spillover =
0505 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
0506 }
0507 }
0508 }
0509
0510
0511 if (!bs->tx_prologue)
0512 return;
0513
0514
0515 if (bs->rx_prologue) {
0516 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
0517 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
0518 | BCM2835_SPI_CS_DMAEN);
0519 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
0520 bcm2835_wait_tx_fifo_empty(bs);
0521 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
0522 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
0523 | BCM2835_SPI_CS_CLEAR_TX
0524 | BCM2835_SPI_CS_DONE);
0525
0526 dma_sync_single_for_device(ctlr->dma_rx->device->dev,
0527 sg_dma_address(&tfr->rx_sg.sgl[0]),
0528 bs->rx_prologue, DMA_FROM_DEVICE);
0529
0530 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
0531 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
0532 }
0533
0534 if (!bs->tx_buf)
0535 return;
0536
0537
0538
0539
0540
0541 tx_remaining = bs->tx_prologue - bs->rx_prologue;
0542 if (tx_remaining) {
0543 bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
0544 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
0545 | BCM2835_SPI_CS_DMAEN);
0546 bcm2835_wr_fifo_count(bs, tx_remaining);
0547 bcm2835_wait_tx_fifo_empty(bs);
0548 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
0549 | BCM2835_SPI_CS_DONE);
0550 }
0551
0552 if (likely(!bs->tx_spillover)) {
0553 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
0554 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
0555 } else {
0556 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
0557 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
0558 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
0559 }
0560 }
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
0571 {
0572 struct spi_transfer *tfr = bs->tfr;
0573
0574 if (!bs->tx_prologue)
0575 return;
0576
0577 if (bs->rx_prologue) {
0578 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
0579 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
0580 }
0581
0582 if (!bs->tx_buf)
0583 goto out;
0584
0585 if (likely(!bs->tx_spillover)) {
0586 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
0587 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
0588 } else {
0589 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
0590 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
0591 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
0592 }
0593 out:
0594 bs->tx_prologue = 0;
0595 }
0596
0597
0598
0599
0600
0601
0602
0603 static void bcm2835_spi_dma_rx_done(void *data)
0604 {
0605 struct spi_controller *ctlr = data;
0606 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
0607
0608
0609
0610
0611
0612
0613 dmaengine_terminate_async(ctlr->dma_tx);
0614 bs->tx_dma_active = false;
0615 bs->rx_dma_active = false;
0616 bcm2835_spi_undo_prologue(bs);
0617
0618
0619 bcm2835_spi_reset_hw(bs);
0620
0621 ;
0622 spi_finalize_current_transfer(ctlr);
0623 }
0624
0625
0626
0627
0628
0629
0630
0631 static void bcm2835_spi_dma_tx_done(void *data)
0632 {
0633 struct spi_controller *ctlr = data;
0634 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
0635
0636
0637 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
0638 bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
0639
0640 bs->tx_dma_active = false;
0641 smp_wmb();
0642
0643
0644
0645
0646
0647
0648 if (cmpxchg(&bs->rx_dma_active, true, false))
0649 dmaengine_terminate_async(ctlr->dma_rx);
0650
0651 bcm2835_spi_undo_prologue(bs);
0652 bcm2835_spi_reset_hw(bs);
0653 spi_finalize_current_transfer(ctlr);
0654 }
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667 static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
0668 struct spi_transfer *tfr,
0669 struct bcm2835_spi *bs,
0670 struct bcm2835_spidev *slv,
0671 bool is_tx)
0672 {
0673 struct dma_chan *chan;
0674 struct scatterlist *sgl;
0675 unsigned int nents;
0676 enum dma_transfer_direction dir;
0677 unsigned long flags;
0678
0679 struct dma_async_tx_descriptor *desc;
0680 dma_cookie_t cookie;
0681
0682 if (is_tx) {
0683 dir = DMA_MEM_TO_DEV;
0684 chan = ctlr->dma_tx;
0685 nents = tfr->tx_sg.nents;
0686 sgl = tfr->tx_sg.sgl;
0687 flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
0688 } else {
0689 dir = DMA_DEV_TO_MEM;
0690 chan = ctlr->dma_rx;
0691 nents = tfr->rx_sg.nents;
0692 sgl = tfr->rx_sg.sgl;
0693 flags = DMA_PREP_INTERRUPT;
0694 }
0695
0696 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
0697 if (!desc)
0698 return -EINVAL;
0699
0700
0701
0702
0703
0704 if (!is_tx) {
0705 desc->callback = bcm2835_spi_dma_rx_done;
0706 desc->callback_param = ctlr;
0707 } else if (!tfr->rx_buf) {
0708 desc->callback = bcm2835_spi_dma_tx_done;
0709 desc->callback_param = ctlr;
0710 bs->slv = slv;
0711 }
0712
0713
0714 cookie = dmaengine_submit(desc);
0715
0716 return dma_submit_error(cookie);
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
0767 struct spi_transfer *tfr,
0768 struct bcm2835_spidev *slv,
0769 u32 cs)
0770 {
0771 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
0772 dma_cookie_t cookie;
0773 int ret;
0774
0775
0776 bs->count_transfer_dma++;
0777
0778
0779
0780
0781
0782 bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
0783
0784
0785 if (bs->tx_buf) {
0786 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
0787 } else {
0788 cookie = dmaengine_submit(bs->fill_tx_desc);
0789 ret = dma_submit_error(cookie);
0790 }
0791 if (ret)
0792 goto err_reset_hw;
0793
0794
0795 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
0796
0797
0798 bcm2835_wr(bs, BCM2835_SPI_CS,
0799 cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
0800
0801 bs->tx_dma_active = true;
0802 smp_wmb();
0803
0804
0805 dma_async_issue_pending(ctlr->dma_tx);
0806
0807
0808
0809
0810
0811 if (bs->rx_buf) {
0812 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
0813 } else {
0814 cookie = dmaengine_submit(slv->clear_rx_desc);
0815 ret = dma_submit_error(cookie);
0816 }
0817 if (ret) {
0818
0819 dmaengine_terminate_sync(ctlr->dma_tx);
0820 bs->tx_dma_active = false;
0821 goto err_reset_hw;
0822 }
0823
0824
0825 dma_async_issue_pending(ctlr->dma_rx);
0826 bs->rx_dma_active = true;
0827 smp_mb();
0828
0829
0830
0831
0832
0833 if (!bs->rx_buf && !bs->tx_dma_active &&
0834 cmpxchg(&bs->rx_dma_active, true, false)) {
0835 dmaengine_terminate_async(ctlr->dma_rx);
0836 bcm2835_spi_reset_hw(bs);
0837 }
0838
0839
0840 return 1;
0841
0842 err_reset_hw:
0843 bcm2835_spi_reset_hw(bs);
0844 bcm2835_spi_undo_prologue(bs);
0845 return ret;
0846 }
0847
0848 static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
0849 struct spi_device *spi,
0850 struct spi_transfer *tfr)
0851 {
0852
0853 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
0854 return false;
0855
0856
0857 return true;
0858 }
0859
0860 static void bcm2835_dma_release(struct spi_controller *ctlr,
0861 struct bcm2835_spi *bs)
0862 {
0863 if (ctlr->dma_tx) {
0864 dmaengine_terminate_sync(ctlr->dma_tx);
0865
0866 if (bs->fill_tx_desc)
0867 dmaengine_desc_free(bs->fill_tx_desc);
0868
0869 if (bs->fill_tx_addr)
0870 dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
0871 bs->fill_tx_addr, sizeof(u32),
0872 DMA_TO_DEVICE,
0873 DMA_ATTR_SKIP_CPU_SYNC);
0874
0875 dma_release_channel(ctlr->dma_tx);
0876 ctlr->dma_tx = NULL;
0877 }
0878
0879 if (ctlr->dma_rx) {
0880 dmaengine_terminate_sync(ctlr->dma_rx);
0881 dma_release_channel(ctlr->dma_rx);
0882 ctlr->dma_rx = NULL;
0883 }
0884 }
0885
0886 static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
0887 struct bcm2835_spi *bs)
0888 {
0889 struct dma_slave_config slave_config;
0890 const __be32 *addr;
0891 dma_addr_t dma_reg_base;
0892 int ret;
0893
0894
0895 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
0896 if (!addr) {
0897 dev_err(dev, "could not get DMA-register address - not using dma mode\n");
0898
0899 return 0;
0900 }
0901 dma_reg_base = be32_to_cpup(addr);
0902
0903
0904 ctlr->dma_tx = dma_request_chan(dev, "tx");
0905 if (IS_ERR(ctlr->dma_tx)) {
0906 dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
0907 ret = PTR_ERR(ctlr->dma_tx);
0908 ctlr->dma_tx = NULL;
0909 goto err;
0910 }
0911 ctlr->dma_rx = dma_request_chan(dev, "rx");
0912 if (IS_ERR(ctlr->dma_rx)) {
0913 dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
0914 ret = PTR_ERR(ctlr->dma_rx);
0915 ctlr->dma_rx = NULL;
0916 goto err_release;
0917 }
0918
0919
0920
0921
0922
0923
0924 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
0925 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0926
0927 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
0928 if (ret)
0929 goto err_config;
0930
0931 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
0932 ZERO_PAGE(0), 0, sizeof(u32),
0933 DMA_TO_DEVICE,
0934 DMA_ATTR_SKIP_CPU_SYNC);
0935 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
0936 dev_err(dev, "cannot map zero page - not using DMA mode\n");
0937 bs->fill_tx_addr = 0;
0938 ret = -ENOMEM;
0939 goto err_release;
0940 }
0941
0942 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
0943 bs->fill_tx_addr,
0944 sizeof(u32), 0,
0945 DMA_MEM_TO_DEV, 0);
0946 if (!bs->fill_tx_desc) {
0947 dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
0948 ret = -ENOMEM;
0949 goto err_release;
0950 }
0951
0952 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
0953 if (ret) {
0954 dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
0955 goto err_release;
0956 }
0957
0958
0959
0960
0961
0962
0963 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
0964 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0965 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
0966 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0967
0968 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
0969 if (ret)
0970 goto err_config;
0971
0972
0973 ctlr->can_dma = bcm2835_spi_can_dma;
0974
0975 return 0;
0976
0977 err_config:
0978 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
0979 ret);
0980 err_release:
0981 bcm2835_dma_release(ctlr, bs);
0982 err:
0983
0984
0985
0986
0987 if (ret != -EPROBE_DEFER)
0988 ret = 0;
0989
0990 return ret;
0991 }
0992
0993 static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
0994 struct spi_device *spi,
0995 struct spi_transfer *tfr,
0996 u32 cs)
0997 {
0998 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
0999 unsigned long timeout;
1000
1001
1002 bs->count_transfer_polling++;
1003
1004
1005 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
1006
1007
1008
1009
1010
1011 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
1012
1013
1014 timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
1015
1016
1017 while (bs->rx_len) {
1018
1019 bcm2835_wr_fifo(bs);
1020
1021
1022 bcm2835_rd_fifo(bs);
1023
1024
1025
1026
1027 if (bs->rx_len && time_after(jiffies, timeout)) {
1028 dev_dbg_ratelimited(&spi->dev,
1029 "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
1030 jiffies - timeout,
1031 bs->tx_len, bs->rx_len);
1032
1033
1034
1035 bs->count_transfer_irq_after_polling++;
1036
1037 return bcm2835_spi_transfer_one_irq(ctlr, spi,
1038 tfr, cs, false);
1039 }
1040 }
1041
1042
1043 bcm2835_spi_reset_hw(bs);
1044
1045 return 0;
1046 }
1047
1048 static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
1049 struct spi_device *spi,
1050 struct spi_transfer *tfr)
1051 {
1052 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1053 struct bcm2835_spidev *slv = spi_get_ctldata(spi);
1054 unsigned long spi_hz, cdiv;
1055 unsigned long hz_per_byte, byte_limit;
1056 u32 cs = slv->prepare_cs;
1057
1058
1059 spi_hz = tfr->speed_hz;
1060
1061 if (spi_hz >= bs->clk_hz / 2) {
1062 cdiv = 2;
1063 } else if (spi_hz) {
1064
1065 cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
1066 cdiv += (cdiv % 2);
1067
1068 if (cdiv >= 65536)
1069 cdiv = 0;
1070 } else {
1071 cdiv = 0;
1072 }
1073 tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
1074 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
1075
1076
1077 if (spi->mode & SPI_3WIRE && tfr->rx_buf)
1078 cs |= BCM2835_SPI_CS_REN;
1079
1080
1081 bs->tx_buf = tfr->tx_buf;
1082 bs->rx_buf = tfr->rx_buf;
1083 bs->tx_len = tfr->len;
1084 bs->rx_len = tfr->len;
1085
1086
1087
1088
1089
1090
1091
1092 hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
1093 byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
1094
1095
1096 if (tfr->len < byte_limit)
1097 return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
1098
1099
1100
1101
1102
1103 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
1104 return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
1105
1106
1107 return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
1108 }
1109
1110 static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
1111 struct spi_message *msg)
1112 {
1113 struct spi_device *spi = msg->spi;
1114 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1115 struct bcm2835_spidev *slv = spi_get_ctldata(spi);
1116 int ret;
1117
1118 if (ctlr->can_dma) {
1119
1120
1121
1122
1123
1124 ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
1125 GFP_KERNEL | GFP_DMA);
1126 if (ret)
1127 return ret;
1128 }
1129
1130
1131
1132
1133
1134 bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
1135
1136 return 0;
1137 }
1138
1139 static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
1140 struct spi_message *msg)
1141 {
1142 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1143
1144
1145 if (ctlr->dma_tx) {
1146 dmaengine_terminate_sync(ctlr->dma_tx);
1147 bs->tx_dma_active = false;
1148 }
1149 if (ctlr->dma_rx) {
1150 dmaengine_terminate_sync(ctlr->dma_rx);
1151 bs->rx_dma_active = false;
1152 }
1153 bcm2835_spi_undo_prologue(bs);
1154
1155
1156 bcm2835_spi_reset_hw(bs);
1157 }
1158
1159 static int chip_match_name(struct gpio_chip *chip, void *data)
1160 {
1161 return !strcmp(chip->label, data);
1162 }
1163
1164 static void bcm2835_spi_cleanup(struct spi_device *spi)
1165 {
1166 struct bcm2835_spidev *slv = spi_get_ctldata(spi);
1167 struct spi_controller *ctlr = spi->controller;
1168
1169 if (slv->clear_rx_desc)
1170 dmaengine_desc_free(slv->clear_rx_desc);
1171
1172 if (slv->clear_rx_addr)
1173 dma_unmap_single(ctlr->dma_rx->device->dev,
1174 slv->clear_rx_addr,
1175 sizeof(u32),
1176 DMA_TO_DEVICE);
1177
1178 kfree(slv);
1179 }
1180
1181 static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
1182 struct spi_device *spi,
1183 struct bcm2835_spi *bs,
1184 struct bcm2835_spidev *slv)
1185 {
1186 int ret;
1187
1188 if (!ctlr->dma_rx)
1189 return 0;
1190
1191 slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
1192 &slv->clear_rx_cs,
1193 sizeof(u32),
1194 DMA_TO_DEVICE);
1195 if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
1196 dev_err(&spi->dev, "cannot map clear_rx_cs\n");
1197 slv->clear_rx_addr = 0;
1198 return -ENOMEM;
1199 }
1200
1201 slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
1202 slv->clear_rx_addr,
1203 sizeof(u32), 0,
1204 DMA_MEM_TO_DEV, 0);
1205 if (!slv->clear_rx_desc) {
1206 dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
1207 return -ENOMEM;
1208 }
1209
1210 ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
1211 if (ret) {
1212 dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
1213 return ret;
1214 }
1215
1216 return 0;
1217 }
1218
1219 static int bcm2835_spi_setup(struct spi_device *spi)
1220 {
1221 struct spi_controller *ctlr = spi->controller;
1222 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1223 struct bcm2835_spidev *slv = spi_get_ctldata(spi);
1224 struct gpio_chip *chip;
1225 int ret;
1226 u32 cs;
1227
1228 if (!slv) {
1229 slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
1230 GFP_KERNEL);
1231 if (!slv)
1232 return -ENOMEM;
1233
1234 spi_set_ctldata(spi, slv);
1235
1236 ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
1237 if (ret)
1238 goto err_cleanup;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247 cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
1248 if (spi->mode & SPI_CPOL)
1249 cs |= BCM2835_SPI_CS_CPOL;
1250 if (spi->mode & SPI_CPHA)
1251 cs |= BCM2835_SPI_CS_CPHA;
1252 slv->prepare_cs = cs;
1253
1254
1255
1256
1257
1258 if (ctlr->dma_rx) {
1259 slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
1260 BCM2835_SPI_CS_DMAEN |
1261 BCM2835_SPI_CS_CLEAR_RX;
1262 dma_sync_single_for_device(ctlr->dma_rx->device->dev,
1263 slv->clear_rx_addr,
1264 sizeof(u32),
1265 DMA_TO_DEVICE);
1266 }
1267
1268
1269
1270
1271 if (spi->mode & SPI_NO_CS)
1272 return 0;
1273
1274
1275
1276
1277 if (spi->cs_gpiod)
1278 return 0;
1279 if (spi->chip_select > 1) {
1280
1281
1282
1283
1284 dev_err(&spi->dev,
1285 "setup: only two native chip-selects are supported\n");
1286 ret = -EINVAL;
1287 goto err_cleanup;
1288 }
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
1301 if (!chip)
1302 return 0;
1303
1304 spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
1305 DRV_NAME,
1306 GPIO_LOOKUP_FLAGS_DEFAULT,
1307 GPIOD_OUT_LOW);
1308 if (IS_ERR(spi->cs_gpiod)) {
1309 ret = PTR_ERR(spi->cs_gpiod);
1310 goto err_cleanup;
1311 }
1312
1313
1314 dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
1315 spi->chip_select);
1316
1317 return 0;
1318
1319 err_cleanup:
1320 bcm2835_spi_cleanup(spi);
1321 return ret;
1322 }
1323
1324 static int bcm2835_spi_probe(struct platform_device *pdev)
1325 {
1326 struct spi_controller *ctlr;
1327 struct bcm2835_spi *bs;
1328 int err;
1329
1330 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
1331 if (!ctlr)
1332 return -ENOMEM;
1333
1334 platform_set_drvdata(pdev, ctlr);
1335
1336 ctlr->use_gpio_descriptors = true;
1337 ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
1338 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
1339 ctlr->num_chipselect = 3;
1340 ctlr->setup = bcm2835_spi_setup;
1341 ctlr->cleanup = bcm2835_spi_cleanup;
1342 ctlr->transfer_one = bcm2835_spi_transfer_one;
1343 ctlr->handle_err = bcm2835_spi_handle_err;
1344 ctlr->prepare_message = bcm2835_spi_prepare_message;
1345 ctlr->dev.of_node = pdev->dev.of_node;
1346
1347 bs = spi_controller_get_devdata(ctlr);
1348 bs->ctlr = ctlr;
1349
1350 bs->regs = devm_platform_ioremap_resource(pdev, 0);
1351 if (IS_ERR(bs->regs))
1352 return PTR_ERR(bs->regs);
1353
1354 bs->clk = devm_clk_get(&pdev->dev, NULL);
1355 if (IS_ERR(bs->clk))
1356 return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
1357 "could not get clk\n");
1358
1359 ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
1360
1361 bs->irq = platform_get_irq(pdev, 0);
1362 if (bs->irq <= 0)
1363 return bs->irq ? bs->irq : -ENODEV;
1364
1365 clk_prepare_enable(bs->clk);
1366 bs->clk_hz = clk_get_rate(bs->clk);
1367
1368 err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
1369 if (err)
1370 goto out_clk_disable;
1371
1372
1373 bcm2835_wr(bs, BCM2835_SPI_CS,
1374 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
1375
1376 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
1377 IRQF_SHARED, dev_name(&pdev->dev), bs);
1378 if (err) {
1379 dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
1380 goto out_dma_release;
1381 }
1382
1383 err = spi_register_controller(ctlr);
1384 if (err) {
1385 dev_err(&pdev->dev, "could not register SPI controller: %d\n",
1386 err);
1387 goto out_dma_release;
1388 }
1389
1390 bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
1391
1392 return 0;
1393
1394 out_dma_release:
1395 bcm2835_dma_release(ctlr, bs);
1396 out_clk_disable:
1397 clk_disable_unprepare(bs->clk);
1398 return err;
1399 }
1400
1401 static int bcm2835_spi_remove(struct platform_device *pdev)
1402 {
1403 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1404 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1405
1406 bcm2835_debugfs_remove(bs);
1407
1408 spi_unregister_controller(ctlr);
1409
1410 bcm2835_dma_release(ctlr, bs);
1411
1412
1413 bcm2835_wr(bs, BCM2835_SPI_CS,
1414 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
1415
1416 clk_disable_unprepare(bs->clk);
1417
1418 return 0;
1419 }
1420
1421 static void bcm2835_spi_shutdown(struct platform_device *pdev)
1422 {
1423 int ret;
1424
1425 ret = bcm2835_spi_remove(pdev);
1426 if (ret)
1427 dev_err(&pdev->dev, "failed to shutdown\n");
1428 }
1429
1430 static const struct of_device_id bcm2835_spi_match[] = {
1431 { .compatible = "brcm,bcm2835-spi", },
1432 {}
1433 };
1434 MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
1435
1436 static struct platform_driver bcm2835_spi_driver = {
1437 .driver = {
1438 .name = DRV_NAME,
1439 .of_match_table = bcm2835_spi_match,
1440 },
1441 .probe = bcm2835_spi_probe,
1442 .remove = bcm2835_spi_remove,
1443 .shutdown = bcm2835_spi_shutdown,
1444 };
1445 module_platform_driver(bcm2835_spi_driver);
1446
1447 MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
1448 MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
1449 MODULE_LICENSE("GPL");