0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/init.h>
0017 #include <linux/module.h>
0018 #include <linux/device.h>
0019 #include <linux/ioport.h>
0020 #include <linux/errno.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/spi/spi.h>
0023 #include <linux/delay.h>
0024 #include <linux/clk.h>
0025 #include <linux/err.h>
0026 #include <linux/amba/bus.h>
0027 #include <linux/amba/pl022.h>
0028 #include <linux/io.h>
0029 #include <linux/slab.h>
0030 #include <linux/dmaengine.h>
0031 #include <linux/dma-mapping.h>
0032 #include <linux/scatterlist.h>
0033 #include <linux/pm_runtime.h>
0034 #include <linux/of.h>
0035 #include <linux/pinctrl/consumer.h>
0036
0037
0038
0039
0040
0041
0042 #define SSP_WRITE_BITS(reg, val, mask, sb) \
0043 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
0044
0045
0046
0047
0048
0049
0050 #define GEN_MASK_BITS(val, mask, sb) \
0051 (((val)<<(sb)) & (mask))
0052
0053 #define DRIVE_TX 0
0054 #define DO_NOT_DRIVE_TX 1
0055
0056 #define DO_NOT_QUEUE_DMA 0
0057 #define QUEUE_DMA 1
0058
0059 #define RX_TRANSFER 1
0060 #define TX_TRANSFER 2
0061
0062
0063
0064
0065 #define SSP_CR0(r) (r + 0x000)
0066 #define SSP_CR1(r) (r + 0x004)
0067 #define SSP_DR(r) (r + 0x008)
0068 #define SSP_SR(r) (r + 0x00C)
0069 #define SSP_CPSR(r) (r + 0x010)
0070 #define SSP_IMSC(r) (r + 0x014)
0071 #define SSP_RIS(r) (r + 0x018)
0072 #define SSP_MIS(r) (r + 0x01C)
0073 #define SSP_ICR(r) (r + 0x020)
0074 #define SSP_DMACR(r) (r + 0x024)
0075 #define SSP_CSR(r) (r + 0x030)
0076 #define SSP_ITCR(r) (r + 0x080)
0077 #define SSP_ITIP(r) (r + 0x084)
0078 #define SSP_ITOP(r) (r + 0x088)
0079 #define SSP_TDR(r) (r + 0x08C)
0080
0081 #define SSP_PID0(r) (r + 0xFE0)
0082 #define SSP_PID1(r) (r + 0xFE4)
0083 #define SSP_PID2(r) (r + 0xFE8)
0084 #define SSP_PID3(r) (r + 0xFEC)
0085
0086 #define SSP_CID0(r) (r + 0xFF0)
0087 #define SSP_CID1(r) (r + 0xFF4)
0088 #define SSP_CID2(r) (r + 0xFF8)
0089 #define SSP_CID3(r) (r + 0xFFC)
0090
0091
0092
0093
0094 #define SSP_CR0_MASK_DSS (0x0FUL << 0)
0095 #define SSP_CR0_MASK_FRF (0x3UL << 4)
0096 #define SSP_CR0_MASK_SPO (0x1UL << 6)
0097 #define SSP_CR0_MASK_SPH (0x1UL << 7)
0098 #define SSP_CR0_MASK_SCR (0xFFUL << 8)
0099
0100
0101
0102
0103
0104 #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
0105 #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
0106 #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
0107 #define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
0108
0109
0110
0111
0112 #define SSP_CR1_MASK_LBM (0x1UL << 0)
0113 #define SSP_CR1_MASK_SSE (0x1UL << 1)
0114 #define SSP_CR1_MASK_MS (0x1UL << 2)
0115 #define SSP_CR1_MASK_SOD (0x1UL << 3)
0116
0117
0118
0119
0120
0121 #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
0122 #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
0123 #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
0124 #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
0125 #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
0126
0127 #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
0128
0129
0130
0131
0132 #define SSP_SR_MASK_TFE (0x1UL << 0)
0133 #define SSP_SR_MASK_TNF (0x1UL << 1)
0134 #define SSP_SR_MASK_RNE (0x1UL << 2)
0135 #define SSP_SR_MASK_RFF (0x1UL << 3)
0136 #define SSP_SR_MASK_BSY (0x1UL << 4)
0137
0138
0139
0140
0141 #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
0142
0143
0144
0145
0146 #define SSP_IMSC_MASK_RORIM (0x1UL << 0)
0147 #define SSP_IMSC_MASK_RTIM (0x1UL << 1)
0148 #define SSP_IMSC_MASK_RXIM (0x1UL << 2)
0149 #define SSP_IMSC_MASK_TXIM (0x1UL << 3)
0150
0151
0152
0153
0154
0155 #define SSP_RIS_MASK_RORRIS (0x1UL << 0)
0156
0157 #define SSP_RIS_MASK_RTRIS (0x1UL << 1)
0158
0159 #define SSP_RIS_MASK_RXRIS (0x1UL << 2)
0160
0161 #define SSP_RIS_MASK_TXRIS (0x1UL << 3)
0162
0163
0164
0165
0166
0167 #define SSP_MIS_MASK_RORMIS (0x1UL << 0)
0168
0169 #define SSP_MIS_MASK_RTMIS (0x1UL << 1)
0170
0171 #define SSP_MIS_MASK_RXMIS (0x1UL << 2)
0172
0173 #define SSP_MIS_MASK_TXMIS (0x1UL << 3)
0174
0175
0176
0177
0178
0179 #define SSP_ICR_MASK_RORIC (0x1UL << 0)
0180
0181 #define SSP_ICR_MASK_RTIC (0x1UL << 1)
0182
0183
0184
0185
0186
0187 #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
0188
0189 #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
0190
0191
0192
0193
0194
0195 #define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
0196
0197
0198
0199
0200 #define SSP_ITCR_MASK_ITEN (0x1UL << 0)
0201 #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
0202
0203
0204
0205
0206 #define ITIP_MASK_SSPRXD (0x1UL << 0)
0207 #define ITIP_MASK_SSPFSSIN (0x1UL << 1)
0208 #define ITIP_MASK_SSPCLKIN (0x1UL << 2)
0209 #define ITIP_MASK_RXDMAC (0x1UL << 3)
0210 #define ITIP_MASK_TXDMAC (0x1UL << 4)
0211 #define ITIP_MASK_SSPTXDIN (0x1UL << 5)
0212
0213
0214
0215
0216 #define ITOP_MASK_SSPTXD (0x1UL << 0)
0217 #define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
0218 #define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
0219 #define ITOP_MASK_SSPOEn (0x1UL << 3)
0220 #define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
0221 #define ITOP_MASK_RORINTR (0x1UL << 5)
0222 #define ITOP_MASK_RTINTR (0x1UL << 6)
0223 #define ITOP_MASK_RXINTR (0x1UL << 7)
0224 #define ITOP_MASK_TXINTR (0x1UL << 8)
0225 #define ITOP_MASK_INTR (0x1UL << 9)
0226 #define ITOP_MASK_RXDMABREQ (0x1UL << 10)
0227 #define ITOP_MASK_RXDMASREQ (0x1UL << 11)
0228 #define ITOP_MASK_TXDMABREQ (0x1UL << 12)
0229 #define ITOP_MASK_TXDMASREQ (0x1UL << 13)
0230
0231
0232
0233
0234 #define TDR_MASK_TESTDATA (0xFFFFFFFF)
0235
0236
0237
0238
0239
0240
0241
0242 #define STATE_START ((void *) 0)
0243 #define STATE_RUNNING ((void *) 1)
0244 #define STATE_DONE ((void *) 2)
0245 #define STATE_ERROR ((void *) -1)
0246 #define STATE_TIMEOUT ((void *) -2)
0247
0248
0249
0250
0251 #define SSP_DISABLED (0)
0252 #define SSP_ENABLED (1)
0253
0254
0255
0256
0257 #define SSP_DMA_DISABLED (0)
0258 #define SSP_DMA_ENABLED (1)
0259
0260
0261
0262
0263 #define SSP_DEFAULT_CLKRATE 0x2
0264 #define SSP_DEFAULT_PRESCALE 0x40
0265
0266
0267
0268
0269 #define CPSDVR_MIN 0x02
0270 #define CPSDVR_MAX 0xFE
0271 #define SCR_MIN 0x00
0272 #define SCR_MAX 0xFF
0273
0274
0275
0276
0277 #define DEFAULT_SSP_REG_IMSC 0x0UL
0278 #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
0279 #define ENABLE_ALL_INTERRUPTS ( \
0280 SSP_IMSC_MASK_RORIM | \
0281 SSP_IMSC_MASK_RTIM | \
0282 SSP_IMSC_MASK_RXIM | \
0283 SSP_IMSC_MASK_TXIM \
0284 )
0285
0286 #define CLEAR_ALL_INTERRUPTS 0x3
0287
0288 #define SPI_POLLING_TIMEOUT 1000
0289
0290
0291
0292
0293 enum ssp_reading {
0294 READING_NULL,
0295 READING_U8,
0296 READING_U16,
0297 READING_U32
0298 };
0299
0300
0301
0302
0303 enum ssp_writing {
0304 WRITING_NULL,
0305 WRITING_U8,
0306 WRITING_U16,
0307 WRITING_U32
0308 };
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322 struct vendor_data {
0323 int fifodepth;
0324 int max_bpw;
0325 bool unidir;
0326 bool extended_cr;
0327 bool pl023;
0328 bool loopback;
0329 bool internal_cs_ctrl;
0330 };
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 struct pl022 {
0368 struct amba_device *adev;
0369 struct vendor_data *vendor;
0370 resource_size_t phybase;
0371 void __iomem *virtbase;
0372 struct clk *clk;
0373 struct spi_master *master;
0374 struct pl022_ssp_controller *master_info;
0375
0376 struct tasklet_struct pump_transfers;
0377 struct spi_message *cur_msg;
0378 struct spi_transfer *cur_transfer;
0379 struct chip_data *cur_chip;
0380 bool next_msg_cs_active;
0381 void *tx;
0382 void *tx_end;
0383 void *rx;
0384 void *rx_end;
0385 enum ssp_reading read;
0386 enum ssp_writing write;
0387 u32 exp_fifo_level;
0388 enum ssp_rx_level_trig rx_lev_trig;
0389 enum ssp_tx_level_trig tx_lev_trig;
0390
0391 #ifdef CONFIG_DMA_ENGINE
0392 struct dma_chan *dma_rx_channel;
0393 struct dma_chan *dma_tx_channel;
0394 struct sg_table sgt_rx;
0395 struct sg_table sgt_tx;
0396 char *dummypage;
0397 bool dma_running;
0398 #endif
0399 int cur_cs;
0400 struct gpio_desc *cur_gpiod;
0401 };
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 struct chip_data {
0420 u32 cr0;
0421 u16 cr1;
0422 u16 dmacr;
0423 u16 cpsr;
0424 u8 n_bytes;
0425 bool enable_dma;
0426 enum ssp_reading read;
0427 enum ssp_writing write;
0428 int xfer_type;
0429 };
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static void internal_cs_control(struct pl022 *pl022, u32 command)
0441 {
0442 u32 tmp;
0443
0444 tmp = readw(SSP_CSR(pl022->virtbase));
0445 if (command == SSP_CHIP_SELECT)
0446 tmp &= ~BIT(pl022->cur_cs);
0447 else
0448 tmp |= BIT(pl022->cur_cs);
0449 writew(tmp, SSP_CSR(pl022->virtbase));
0450 }
0451
0452 static void pl022_cs_control(struct pl022 *pl022, u32 command)
0453 {
0454 if (pl022->vendor->internal_cs_ctrl)
0455 internal_cs_control(pl022, command);
0456 else if (pl022->cur_gpiod)
0457
0458
0459
0460
0461
0462
0463
0464
0465 gpiod_set_value(pl022->cur_gpiod, !command);
0466 }
0467
0468
0469
0470
0471
0472
0473
0474 static void giveback(struct pl022 *pl022)
0475 {
0476 struct spi_transfer *last_transfer;
0477 pl022->next_msg_cs_active = false;
0478
0479 last_transfer = list_last_entry(&pl022->cur_msg->transfers,
0480 struct spi_transfer, transfer_list);
0481
0482
0483
0484
0485
0486
0487 spi_transfer_delay_exec(last_transfer);
0488
0489 if (!last_transfer->cs_change) {
0490 struct spi_message *next_msg;
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 next_msg = spi_get_next_queued_message(pl022->master);
0504
0505
0506
0507
0508
0509 if (next_msg && next_msg->spi != pl022->cur_msg->spi)
0510 next_msg = NULL;
0511 if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
0512 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
0513 else
0514 pl022->next_msg_cs_active = true;
0515
0516 }
0517
0518 pl022->cur_msg = NULL;
0519 pl022->cur_transfer = NULL;
0520 pl022->cur_chip = NULL;
0521
0522
0523 writew((readw(SSP_CR1(pl022->virtbase)) &
0524 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
0525
0526 spi_finalize_current_message(pl022->master);
0527 }
0528
0529
0530
0531
0532
0533 static int flush(struct pl022 *pl022)
0534 {
0535 unsigned long limit = loops_per_jiffy << 1;
0536
0537 dev_dbg(&pl022->adev->dev, "flush\n");
0538 do {
0539 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
0540 readw(SSP_DR(pl022->virtbase));
0541 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
0542
0543 pl022->exp_fifo_level = 0;
0544
0545 return limit;
0546 }
0547
0548
0549
0550
0551
0552 static void restore_state(struct pl022 *pl022)
0553 {
0554 struct chip_data *chip = pl022->cur_chip;
0555
0556 if (pl022->vendor->extended_cr)
0557 writel(chip->cr0, SSP_CR0(pl022->virtbase));
0558 else
0559 writew(chip->cr0, SSP_CR0(pl022->virtbase));
0560 writew(chip->cr1, SSP_CR1(pl022->virtbase));
0561 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
0562 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
0563 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
0564 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
0565 }
0566
0567
0568
0569
0570 #define DEFAULT_SSP_REG_CR0 ( \
0571 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
0572 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
0573 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
0574 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
0575 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
0576 )
0577
0578
0579 #define DEFAULT_SSP_REG_CR0_ST ( \
0580 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
0581 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
0582 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
0583 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
0584 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
0585 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
0586 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
0587 )
0588
0589
0590 #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
0591 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
0592 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
0593 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
0594 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
0595 )
0596
0597 #define DEFAULT_SSP_REG_CR1 ( \
0598 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
0599 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
0600 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
0601 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
0602 )
0603
0604
0605 #define DEFAULT_SSP_REG_CR1_ST ( \
0606 DEFAULT_SSP_REG_CR1 | \
0607 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
0608 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
0609 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
0610 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
0611 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
0612 )
0613
0614
0615
0616
0617
0618 #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
0619 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
0620 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
0621 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
0622 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
0623 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
0624 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
0625 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
0626 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
0627 )
0628
0629 #define DEFAULT_SSP_REG_CPSR ( \
0630 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
0631 )
0632
0633 #define DEFAULT_SSP_REG_DMACR (\
0634 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
0635 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
0636 )
0637
0638
0639
0640
0641
0642 static void load_ssp_default_config(struct pl022 *pl022)
0643 {
0644 if (pl022->vendor->pl023) {
0645 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
0646 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
0647 } else if (pl022->vendor->extended_cr) {
0648 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
0649 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
0650 } else {
0651 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
0652 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
0653 }
0654 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
0655 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
0656 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
0657 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
0658 }
0659
0660
0661
0662
0663
0664 static void readwriter(struct pl022 *pl022)
0665 {
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677 dev_dbg(&pl022->adev->dev,
0678 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
0679 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
0680
0681
0682 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
0683 && (pl022->rx < pl022->rx_end)) {
0684 switch (pl022->read) {
0685 case READING_NULL:
0686 readw(SSP_DR(pl022->virtbase));
0687 break;
0688 case READING_U8:
0689 *(u8 *) (pl022->rx) =
0690 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
0691 break;
0692 case READING_U16:
0693 *(u16 *) (pl022->rx) =
0694 (u16) readw(SSP_DR(pl022->virtbase));
0695 break;
0696 case READING_U32:
0697 *(u32 *) (pl022->rx) =
0698 readl(SSP_DR(pl022->virtbase));
0699 break;
0700 }
0701 pl022->rx += (pl022->cur_chip->n_bytes);
0702 pl022->exp_fifo_level--;
0703 }
0704
0705
0706
0707 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
0708 && (pl022->tx < pl022->tx_end)) {
0709 switch (pl022->write) {
0710 case WRITING_NULL:
0711 writew(0x0, SSP_DR(pl022->virtbase));
0712 break;
0713 case WRITING_U8:
0714 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
0715 break;
0716 case WRITING_U16:
0717 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
0718 break;
0719 case WRITING_U32:
0720 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
0721 break;
0722 }
0723 pl022->tx += (pl022->cur_chip->n_bytes);
0724 pl022->exp_fifo_level++;
0725
0726
0727
0728
0729
0730
0731 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
0732 && (pl022->rx < pl022->rx_end)) {
0733 switch (pl022->read) {
0734 case READING_NULL:
0735 readw(SSP_DR(pl022->virtbase));
0736 break;
0737 case READING_U8:
0738 *(u8 *) (pl022->rx) =
0739 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
0740 break;
0741 case READING_U16:
0742 *(u16 *) (pl022->rx) =
0743 (u16) readw(SSP_DR(pl022->virtbase));
0744 break;
0745 case READING_U32:
0746 *(u32 *) (pl022->rx) =
0747 readl(SSP_DR(pl022->virtbase));
0748 break;
0749 }
0750 pl022->rx += (pl022->cur_chip->n_bytes);
0751 pl022->exp_fifo_level--;
0752 }
0753 }
0754
0755
0756
0757
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769 static void *next_transfer(struct pl022 *pl022)
0770 {
0771 struct spi_message *msg = pl022->cur_msg;
0772 struct spi_transfer *trans = pl022->cur_transfer;
0773
0774
0775 if (trans->transfer_list.next != &msg->transfers) {
0776 pl022->cur_transfer =
0777 list_entry(trans->transfer_list.next,
0778 struct spi_transfer, transfer_list);
0779 return STATE_RUNNING;
0780 }
0781 return STATE_DONE;
0782 }
0783
0784
0785
0786
0787
0788 #ifdef CONFIG_DMA_ENGINE
0789 static void unmap_free_dma_scatter(struct pl022 *pl022)
0790 {
0791
0792 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
0793 pl022->sgt_tx.nents, DMA_TO_DEVICE);
0794 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
0795 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
0796 sg_free_table(&pl022->sgt_rx);
0797 sg_free_table(&pl022->sgt_tx);
0798 }
0799
0800 static void dma_callback(void *data)
0801 {
0802 struct pl022 *pl022 = data;
0803 struct spi_message *msg = pl022->cur_msg;
0804
0805 BUG_ON(!pl022->sgt_rx.sgl);
0806
0807 #ifdef VERBOSE_DEBUG
0808
0809
0810
0811
0812
0813
0814 {
0815 struct scatterlist *sg;
0816 unsigned int i;
0817
0818 dma_sync_sg_for_cpu(&pl022->adev->dev,
0819 pl022->sgt_rx.sgl,
0820 pl022->sgt_rx.nents,
0821 DMA_FROM_DEVICE);
0822
0823 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
0824 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
0825 print_hex_dump(KERN_ERR, "SPI RX: ",
0826 DUMP_PREFIX_OFFSET,
0827 16,
0828 1,
0829 sg_virt(sg),
0830 sg_dma_len(sg),
0831 1);
0832 }
0833 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
0834 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
0835 print_hex_dump(KERN_ERR, "SPI TX: ",
0836 DUMP_PREFIX_OFFSET,
0837 16,
0838 1,
0839 sg_virt(sg),
0840 sg_dma_len(sg),
0841 1);
0842 }
0843 }
0844 #endif
0845
0846 unmap_free_dma_scatter(pl022);
0847
0848
0849 msg->actual_length += pl022->cur_transfer->len;
0850
0851 msg->state = next_transfer(pl022);
0852 if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
0853 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
0854 tasklet_schedule(&pl022->pump_transfers);
0855 }
0856
0857 static void setup_dma_scatter(struct pl022 *pl022,
0858 void *buffer,
0859 unsigned int length,
0860 struct sg_table *sgtab)
0861 {
0862 struct scatterlist *sg;
0863 int bytesleft = length;
0864 void *bufp = buffer;
0865 int mapbytes;
0866 int i;
0867
0868 if (buffer) {
0869 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
0870
0871
0872
0873
0874
0875
0876 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
0877 mapbytes = bytesleft;
0878 else
0879 mapbytes = PAGE_SIZE - offset_in_page(bufp);
0880 sg_set_page(sg, virt_to_page(bufp),
0881 mapbytes, offset_in_page(bufp));
0882 bufp += mapbytes;
0883 bytesleft -= mapbytes;
0884 dev_dbg(&pl022->adev->dev,
0885 "set RX/TX target page @ %p, %d bytes, %d left\n",
0886 bufp, mapbytes, bytesleft);
0887 }
0888 } else {
0889
0890 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
0891 if (bytesleft < PAGE_SIZE)
0892 mapbytes = bytesleft;
0893 else
0894 mapbytes = PAGE_SIZE;
0895 sg_set_page(sg, virt_to_page(pl022->dummypage),
0896 mapbytes, 0);
0897 bytesleft -= mapbytes;
0898 dev_dbg(&pl022->adev->dev,
0899 "set RX/TX to dummy page %d bytes, %d left\n",
0900 mapbytes, bytesleft);
0901
0902 }
0903 }
0904 BUG_ON(bytesleft);
0905 }
0906
0907
0908
0909
0910
0911 static int configure_dma(struct pl022 *pl022)
0912 {
0913 struct dma_slave_config rx_conf = {
0914 .src_addr = SSP_DR(pl022->phybase),
0915 .direction = DMA_DEV_TO_MEM,
0916 .device_fc = false,
0917 };
0918 struct dma_slave_config tx_conf = {
0919 .dst_addr = SSP_DR(pl022->phybase),
0920 .direction = DMA_MEM_TO_DEV,
0921 .device_fc = false,
0922 };
0923 unsigned int pages;
0924 int ret;
0925 int rx_sglen, tx_sglen;
0926 struct dma_chan *rxchan = pl022->dma_rx_channel;
0927 struct dma_chan *txchan = pl022->dma_tx_channel;
0928 struct dma_async_tx_descriptor *rxdesc;
0929 struct dma_async_tx_descriptor *txdesc;
0930
0931
0932 if (!rxchan || !txchan)
0933 return -ENODEV;
0934
0935
0936
0937
0938
0939
0940
0941 switch (pl022->rx_lev_trig) {
0942 case SSP_RX_1_OR_MORE_ELEM:
0943 rx_conf.src_maxburst = 1;
0944 break;
0945 case SSP_RX_4_OR_MORE_ELEM:
0946 rx_conf.src_maxburst = 4;
0947 break;
0948 case SSP_RX_8_OR_MORE_ELEM:
0949 rx_conf.src_maxburst = 8;
0950 break;
0951 case SSP_RX_16_OR_MORE_ELEM:
0952 rx_conf.src_maxburst = 16;
0953 break;
0954 case SSP_RX_32_OR_MORE_ELEM:
0955 rx_conf.src_maxburst = 32;
0956 break;
0957 default:
0958 rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
0959 break;
0960 }
0961
0962 switch (pl022->tx_lev_trig) {
0963 case SSP_TX_1_OR_MORE_EMPTY_LOC:
0964 tx_conf.dst_maxburst = 1;
0965 break;
0966 case SSP_TX_4_OR_MORE_EMPTY_LOC:
0967 tx_conf.dst_maxburst = 4;
0968 break;
0969 case SSP_TX_8_OR_MORE_EMPTY_LOC:
0970 tx_conf.dst_maxburst = 8;
0971 break;
0972 case SSP_TX_16_OR_MORE_EMPTY_LOC:
0973 tx_conf.dst_maxburst = 16;
0974 break;
0975 case SSP_TX_32_OR_MORE_EMPTY_LOC:
0976 tx_conf.dst_maxburst = 32;
0977 break;
0978 default:
0979 tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
0980 break;
0981 }
0982
0983 switch (pl022->read) {
0984 case READING_NULL:
0985
0986 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
0987 break;
0988 case READING_U8:
0989 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0990 break;
0991 case READING_U16:
0992 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
0993 break;
0994 case READING_U32:
0995 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0996 break;
0997 }
0998
0999 switch (pl022->write) {
1000 case WRITING_NULL:
1001
1002 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1003 break;
1004 case WRITING_U8:
1005 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1006 break;
1007 case WRITING_U16:
1008 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1009 break;
1010 case WRITING_U32:
1011 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1012 break;
1013 }
1014
1015
1016 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1017 rx_conf.src_addr_width = tx_conf.dst_addr_width;
1018 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1019 tx_conf.dst_addr_width = rx_conf.src_addr_width;
1020 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
1021
1022 dmaengine_slave_config(rxchan, &rx_conf);
1023 dmaengine_slave_config(txchan, &tx_conf);
1024
1025
1026 pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
1027 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
1028
1029 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
1030 if (ret)
1031 goto err_alloc_rx_sg;
1032
1033 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
1034 if (ret)
1035 goto err_alloc_tx_sg;
1036
1037
1038 setup_dma_scatter(pl022, pl022->rx,
1039 pl022->cur_transfer->len, &pl022->sgt_rx);
1040 setup_dma_scatter(pl022, pl022->tx,
1041 pl022->cur_transfer->len, &pl022->sgt_tx);
1042
1043
1044 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1045 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1046 if (!rx_sglen)
1047 goto err_rx_sgmap;
1048
1049 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1050 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1051 if (!tx_sglen)
1052 goto err_tx_sgmap;
1053
1054
1055 rxdesc = dmaengine_prep_slave_sg(rxchan,
1056 pl022->sgt_rx.sgl,
1057 rx_sglen,
1058 DMA_DEV_TO_MEM,
1059 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1060 if (!rxdesc)
1061 goto err_rxdesc;
1062
1063 txdesc = dmaengine_prep_slave_sg(txchan,
1064 pl022->sgt_tx.sgl,
1065 tx_sglen,
1066 DMA_MEM_TO_DEV,
1067 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1068 if (!txdesc)
1069 goto err_txdesc;
1070
1071
1072 rxdesc->callback = dma_callback;
1073 rxdesc->callback_param = pl022;
1074
1075
1076 dmaengine_submit(rxdesc);
1077 dmaengine_submit(txdesc);
1078 dma_async_issue_pending(rxchan);
1079 dma_async_issue_pending(txchan);
1080 pl022->dma_running = true;
1081
1082 return 0;
1083
1084 err_txdesc:
1085 dmaengine_terminate_all(txchan);
1086 err_rxdesc:
1087 dmaengine_terminate_all(rxchan);
1088 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1089 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1090 err_tx_sgmap:
1091 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1092 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1093 err_rx_sgmap:
1094 sg_free_table(&pl022->sgt_tx);
1095 err_alloc_tx_sg:
1096 sg_free_table(&pl022->sgt_rx);
1097 err_alloc_rx_sg:
1098 return -ENOMEM;
1099 }
1100
1101 static int pl022_dma_probe(struct pl022 *pl022)
1102 {
1103 dma_cap_mask_t mask;
1104
1105
1106 dma_cap_zero(mask);
1107 dma_cap_set(DMA_SLAVE, mask);
1108
1109
1110
1111
1112 pl022->dma_rx_channel = dma_request_channel(mask,
1113 pl022->master_info->dma_filter,
1114 pl022->master_info->dma_rx_param);
1115 if (!pl022->dma_rx_channel) {
1116 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1117 goto err_no_rxchan;
1118 }
1119
1120 pl022->dma_tx_channel = dma_request_channel(mask,
1121 pl022->master_info->dma_filter,
1122 pl022->master_info->dma_tx_param);
1123 if (!pl022->dma_tx_channel) {
1124 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1125 goto err_no_txchan;
1126 }
1127
1128 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1129 if (!pl022->dummypage)
1130 goto err_no_dummypage;
1131
1132 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1133 dma_chan_name(pl022->dma_rx_channel),
1134 dma_chan_name(pl022->dma_tx_channel));
1135
1136 return 0;
1137
1138 err_no_dummypage:
1139 dma_release_channel(pl022->dma_tx_channel);
1140 err_no_txchan:
1141 dma_release_channel(pl022->dma_rx_channel);
1142 pl022->dma_rx_channel = NULL;
1143 err_no_rxchan:
1144 dev_err(&pl022->adev->dev,
1145 "Failed to work in dma mode, work without dma!\n");
1146 return -ENODEV;
1147 }
1148
1149 static int pl022_dma_autoprobe(struct pl022 *pl022)
1150 {
1151 struct device *dev = &pl022->adev->dev;
1152 struct dma_chan *chan;
1153 int err;
1154
1155
1156 chan = dma_request_chan(dev, "rx");
1157 if (IS_ERR(chan)) {
1158 err = PTR_ERR(chan);
1159 goto err_no_rxchan;
1160 }
1161
1162 pl022->dma_rx_channel = chan;
1163
1164 chan = dma_request_chan(dev, "tx");
1165 if (IS_ERR(chan)) {
1166 err = PTR_ERR(chan);
1167 goto err_no_txchan;
1168 }
1169
1170 pl022->dma_tx_channel = chan;
1171
1172 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1173 if (!pl022->dummypage) {
1174 err = -ENOMEM;
1175 goto err_no_dummypage;
1176 }
1177
1178 return 0;
1179
1180 err_no_dummypage:
1181 dma_release_channel(pl022->dma_tx_channel);
1182 pl022->dma_tx_channel = NULL;
1183 err_no_txchan:
1184 dma_release_channel(pl022->dma_rx_channel);
1185 pl022->dma_rx_channel = NULL;
1186 err_no_rxchan:
1187 return err;
1188 }
1189
1190 static void terminate_dma(struct pl022 *pl022)
1191 {
1192 struct dma_chan *rxchan = pl022->dma_rx_channel;
1193 struct dma_chan *txchan = pl022->dma_tx_channel;
1194
1195 dmaengine_terminate_all(rxchan);
1196 dmaengine_terminate_all(txchan);
1197 unmap_free_dma_scatter(pl022);
1198 pl022->dma_running = false;
1199 }
1200
1201 static void pl022_dma_remove(struct pl022 *pl022)
1202 {
1203 if (pl022->dma_running)
1204 terminate_dma(pl022);
1205 if (pl022->dma_tx_channel)
1206 dma_release_channel(pl022->dma_tx_channel);
1207 if (pl022->dma_rx_channel)
1208 dma_release_channel(pl022->dma_rx_channel);
1209 kfree(pl022->dummypage);
1210 }
1211
1212 #else
1213 static inline int configure_dma(struct pl022 *pl022)
1214 {
1215 return -ENODEV;
1216 }
1217
1218 static inline int pl022_dma_autoprobe(struct pl022 *pl022)
1219 {
1220 return 0;
1221 }
1222
1223 static inline int pl022_dma_probe(struct pl022 *pl022)
1224 {
1225 return 0;
1226 }
1227
1228 static inline void pl022_dma_remove(struct pl022 *pl022)
1229 {
1230 }
1231 #endif
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
1247 {
1248 struct pl022 *pl022 = dev_id;
1249 struct spi_message *msg = pl022->cur_msg;
1250 u16 irq_status = 0;
1251
1252 if (unlikely(!msg)) {
1253 dev_err(&pl022->adev->dev,
1254 "bad message state in interrupt handler");
1255
1256 return IRQ_HANDLED;
1257 }
1258
1259
1260 irq_status = readw(SSP_MIS(pl022->virtbase));
1261
1262 if (unlikely(!irq_status))
1263 return IRQ_NONE;
1264
1265
1266
1267
1268
1269
1270 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
1271
1272
1273
1274
1275 dev_err(&pl022->adev->dev, "FIFO overrun\n");
1276 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
1277 dev_err(&pl022->adev->dev,
1278 "RXFIFO is full\n");
1279
1280
1281
1282
1283
1284
1285 writew(DISABLE_ALL_INTERRUPTS,
1286 SSP_IMSC(pl022->virtbase));
1287 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1288 writew((readw(SSP_CR1(pl022->virtbase)) &
1289 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1290 msg->state = STATE_ERROR;
1291
1292
1293 tasklet_schedule(&pl022->pump_transfers);
1294 return IRQ_HANDLED;
1295 }
1296
1297 readwriter(pl022);
1298
1299 if (pl022->tx == pl022->tx_end) {
1300
1301 writew((readw(SSP_IMSC(pl022->virtbase)) &
1302 ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
1303 SSP_IMSC(pl022->virtbase));
1304 }
1305
1306
1307
1308
1309
1310
1311 if (pl022->rx >= pl022->rx_end) {
1312 writew(DISABLE_ALL_INTERRUPTS,
1313 SSP_IMSC(pl022->virtbase));
1314 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1315 if (unlikely(pl022->rx > pl022->rx_end)) {
1316 dev_warn(&pl022->adev->dev, "read %u surplus "
1317 "bytes (did you request an odd "
1318 "number of bytes on a 16bit bus?)\n",
1319 (u32) (pl022->rx - pl022->rx_end));
1320 }
1321
1322 msg->actual_length += pl022->cur_transfer->len;
1323
1324 msg->state = next_transfer(pl022);
1325 if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
1326 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1327 tasklet_schedule(&pl022->pump_transfers);
1328 return IRQ_HANDLED;
1329 }
1330
1331 return IRQ_HANDLED;
1332 }
1333
1334
1335
1336
1337
1338 static int set_up_next_transfer(struct pl022 *pl022,
1339 struct spi_transfer *transfer)
1340 {
1341 int residue;
1342
1343
1344 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
1345 if (unlikely(residue != 0)) {
1346 dev_err(&pl022->adev->dev,
1347 "message of %u bytes to transmit but the current "
1348 "chip bus has a data width of %u bytes!\n",
1349 pl022->cur_transfer->len,
1350 pl022->cur_chip->n_bytes);
1351 dev_err(&pl022->adev->dev, "skipping this message\n");
1352 return -EIO;
1353 }
1354 pl022->tx = (void *)transfer->tx_buf;
1355 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
1356 pl022->rx = (void *)transfer->rx_buf;
1357 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
1358 pl022->write =
1359 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
1360 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
1361 return 0;
1362 }
1363
1364
1365
1366
1367
1368
1369
1370 static void pump_transfers(unsigned long data)
1371 {
1372 struct pl022 *pl022 = (struct pl022 *) data;
1373 struct spi_message *message = NULL;
1374 struct spi_transfer *transfer = NULL;
1375 struct spi_transfer *previous = NULL;
1376
1377
1378 message = pl022->cur_msg;
1379 transfer = pl022->cur_transfer;
1380
1381
1382 if (message->state == STATE_ERROR) {
1383 message->status = -EIO;
1384 giveback(pl022);
1385 return;
1386 }
1387
1388
1389 if (message->state == STATE_DONE) {
1390 message->status = 0;
1391 giveback(pl022);
1392 return;
1393 }
1394
1395
1396 if (message->state == STATE_RUNNING) {
1397 previous = list_entry(transfer->transfer_list.prev,
1398 struct spi_transfer,
1399 transfer_list);
1400
1401
1402
1403
1404 spi_transfer_delay_exec(previous);
1405
1406
1407 if (previous->cs_change)
1408 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1409 } else {
1410
1411 message->state = STATE_RUNNING;
1412 }
1413
1414 if (set_up_next_transfer(pl022, transfer)) {
1415 message->state = STATE_ERROR;
1416 message->status = -EIO;
1417 giveback(pl022);
1418 return;
1419 }
1420
1421 flush(pl022);
1422
1423 if (pl022->cur_chip->enable_dma) {
1424 if (configure_dma(pl022)) {
1425 dev_dbg(&pl022->adev->dev,
1426 "configuration of DMA failed, fall back to interrupt mode\n");
1427 goto err_config_dma;
1428 }
1429 return;
1430 }
1431
1432 err_config_dma:
1433
1434 writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
1435 }
1436
1437 static void do_interrupt_dma_transfer(struct pl022 *pl022)
1438 {
1439
1440
1441
1442
1443 u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
1444
1445
1446 if (!pl022->next_msg_cs_active)
1447 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1448
1449 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1450
1451 pl022->cur_msg->state = STATE_ERROR;
1452 pl022->cur_msg->status = -EIO;
1453 giveback(pl022);
1454 return;
1455 }
1456
1457 if (pl022->cur_chip->enable_dma) {
1458
1459 if (configure_dma(pl022)) {
1460 dev_dbg(&pl022->adev->dev,
1461 "configuration of DMA failed, fall back to interrupt mode\n");
1462 goto err_config_dma;
1463 }
1464
1465 irqflags = DISABLE_ALL_INTERRUPTS;
1466 }
1467 err_config_dma:
1468
1469 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1470 SSP_CR1(pl022->virtbase));
1471 writew(irqflags, SSP_IMSC(pl022->virtbase));
1472 }
1473
1474 static void print_current_status(struct pl022 *pl022)
1475 {
1476 u32 read_cr0;
1477 u16 read_cr1, read_dmacr, read_sr;
1478
1479 if (pl022->vendor->extended_cr)
1480 read_cr0 = readl(SSP_CR0(pl022->virtbase));
1481 else
1482 read_cr0 = readw(SSP_CR0(pl022->virtbase));
1483 read_cr1 = readw(SSP_CR1(pl022->virtbase));
1484 read_dmacr = readw(SSP_DMACR(pl022->virtbase));
1485 read_sr = readw(SSP_SR(pl022->virtbase));
1486
1487 dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
1488 dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
1489 dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
1490 dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
1491 dev_warn(&pl022->adev->dev,
1492 "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
1493 pl022->exp_fifo_level,
1494 pl022->vendor->fifodepth);
1495
1496 }
1497
1498 static void do_polling_transfer(struct pl022 *pl022)
1499 {
1500 struct spi_message *message = NULL;
1501 struct spi_transfer *transfer = NULL;
1502 struct spi_transfer *previous = NULL;
1503 unsigned long time, timeout;
1504
1505 message = pl022->cur_msg;
1506
1507 while (message->state != STATE_DONE) {
1508
1509 if (message->state == STATE_ERROR)
1510 break;
1511 transfer = pl022->cur_transfer;
1512
1513
1514 if (message->state == STATE_RUNNING) {
1515 previous =
1516 list_entry(transfer->transfer_list.prev,
1517 struct spi_transfer, transfer_list);
1518 spi_transfer_delay_exec(previous);
1519 if (previous->cs_change)
1520 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1521 } else {
1522
1523 message->state = STATE_RUNNING;
1524 if (!pl022->next_msg_cs_active)
1525 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1526 }
1527
1528
1529 if (set_up_next_transfer(pl022, transfer)) {
1530
1531 message->state = STATE_ERROR;
1532 break;
1533 }
1534
1535 flush(pl022);
1536 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1537 SSP_CR1(pl022->virtbase));
1538
1539 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1540
1541 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1542 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1543 time = jiffies;
1544 readwriter(pl022);
1545 if (time_after(time, timeout)) {
1546 dev_warn(&pl022->adev->dev,
1547 "%s: timeout!\n", __func__);
1548 message->state = STATE_TIMEOUT;
1549 print_current_status(pl022);
1550 goto out;
1551 }
1552 cpu_relax();
1553 }
1554
1555
1556 message->actual_length += pl022->cur_transfer->len;
1557
1558 message->state = next_transfer(pl022);
1559 if (message->state != STATE_DONE
1560 && pl022->cur_transfer->cs_change)
1561 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1562 }
1563 out:
1564
1565 if (message->state == STATE_DONE)
1566 message->status = 0;
1567 else if (message->state == STATE_TIMEOUT)
1568 message->status = -EAGAIN;
1569 else
1570 message->status = -EIO;
1571
1572 giveback(pl022);
1573 return;
1574 }
1575
1576 static int pl022_transfer_one_message(struct spi_master *master,
1577 struct spi_message *msg)
1578 {
1579 struct pl022 *pl022 = spi_master_get_devdata(master);
1580
1581
1582 pl022->cur_msg = msg;
1583 msg->state = STATE_START;
1584
1585 pl022->cur_transfer = list_entry(msg->transfers.next,
1586 struct spi_transfer, transfer_list);
1587
1588
1589 pl022->cur_chip = spi_get_ctldata(msg->spi);
1590 pl022->cur_cs = msg->spi->chip_select;
1591
1592 pl022->cur_gpiod = msg->spi->cs_gpiod;
1593
1594 restore_state(pl022);
1595 flush(pl022);
1596
1597 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1598 do_polling_transfer(pl022);
1599 else
1600 do_interrupt_dma_transfer(pl022);
1601
1602 return 0;
1603 }
1604
1605 static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1606 {
1607 struct pl022 *pl022 = spi_master_get_devdata(master);
1608
1609
1610 writew((readw(SSP_CR1(pl022->virtbase)) &
1611 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1612
1613 return 0;
1614 }
1615
1616 static int verify_controller_parameters(struct pl022 *pl022,
1617 struct pl022_config_chip const *chip_info)
1618 {
1619 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1620 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1621 dev_err(&pl022->adev->dev,
1622 "interface is configured incorrectly\n");
1623 return -EINVAL;
1624 }
1625 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1626 (!pl022->vendor->unidir)) {
1627 dev_err(&pl022->adev->dev,
1628 "unidirectional mode not supported in this "
1629 "hardware version\n");
1630 return -EINVAL;
1631 }
1632 if ((chip_info->hierarchy != SSP_MASTER)
1633 && (chip_info->hierarchy != SSP_SLAVE)) {
1634 dev_err(&pl022->adev->dev,
1635 "hierarchy is configured incorrectly\n");
1636 return -EINVAL;
1637 }
1638 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1639 && (chip_info->com_mode != DMA_TRANSFER)
1640 && (chip_info->com_mode != POLLING_TRANSFER)) {
1641 dev_err(&pl022->adev->dev,
1642 "Communication mode is configured incorrectly\n");
1643 return -EINVAL;
1644 }
1645 switch (chip_info->rx_lev_trig) {
1646 case SSP_RX_1_OR_MORE_ELEM:
1647 case SSP_RX_4_OR_MORE_ELEM:
1648 case SSP_RX_8_OR_MORE_ELEM:
1649
1650 break;
1651 case SSP_RX_16_OR_MORE_ELEM:
1652 if (pl022->vendor->fifodepth < 16) {
1653 dev_err(&pl022->adev->dev,
1654 "RX FIFO Trigger Level is configured incorrectly\n");
1655 return -EINVAL;
1656 }
1657 break;
1658 case SSP_RX_32_OR_MORE_ELEM:
1659 if (pl022->vendor->fifodepth < 32) {
1660 dev_err(&pl022->adev->dev,
1661 "RX FIFO Trigger Level is configured incorrectly\n");
1662 return -EINVAL;
1663 }
1664 break;
1665 default:
1666 dev_err(&pl022->adev->dev,
1667 "RX FIFO Trigger Level is configured incorrectly\n");
1668 return -EINVAL;
1669 }
1670 switch (chip_info->tx_lev_trig) {
1671 case SSP_TX_1_OR_MORE_EMPTY_LOC:
1672 case SSP_TX_4_OR_MORE_EMPTY_LOC:
1673 case SSP_TX_8_OR_MORE_EMPTY_LOC:
1674
1675 break;
1676 case SSP_TX_16_OR_MORE_EMPTY_LOC:
1677 if (pl022->vendor->fifodepth < 16) {
1678 dev_err(&pl022->adev->dev,
1679 "TX FIFO Trigger Level is configured incorrectly\n");
1680 return -EINVAL;
1681 }
1682 break;
1683 case SSP_TX_32_OR_MORE_EMPTY_LOC:
1684 if (pl022->vendor->fifodepth < 32) {
1685 dev_err(&pl022->adev->dev,
1686 "TX FIFO Trigger Level is configured incorrectly\n");
1687 return -EINVAL;
1688 }
1689 break;
1690 default:
1691 dev_err(&pl022->adev->dev,
1692 "TX FIFO Trigger Level is configured incorrectly\n");
1693 return -EINVAL;
1694 }
1695 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1696 if ((chip_info->ctrl_len < SSP_BITS_4)
1697 || (chip_info->ctrl_len > SSP_BITS_32)) {
1698 dev_err(&pl022->adev->dev,
1699 "CTRL LEN is configured incorrectly\n");
1700 return -EINVAL;
1701 }
1702 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1703 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1704 dev_err(&pl022->adev->dev,
1705 "Wait State is configured incorrectly\n");
1706 return -EINVAL;
1707 }
1708
1709 if (pl022->vendor->extended_cr) {
1710 if ((chip_info->duplex !=
1711 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1712 && (chip_info->duplex !=
1713 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1714 dev_err(&pl022->adev->dev,
1715 "Microwire duplex mode is configured incorrectly\n");
1716 return -EINVAL;
1717 }
1718 } else {
1719 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
1720 dev_err(&pl022->adev->dev,
1721 "Microwire half duplex mode requested,"
1722 " but this is only available in the"
1723 " ST version of PL022\n");
1724 return -EINVAL;
1725 }
1726 }
1727 }
1728 return 0;
1729 }
1730
1731 static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1732 {
1733 return rate / (cpsdvsr * (1 + scr));
1734 }
1735
1736 static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1737 ssp_clock_params * clk_freq)
1738 {
1739
1740 u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
1741 u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
1742 best_scr = 0, tmp, found = 0;
1743
1744 rate = clk_get_rate(pl022->clk);
1745
1746 max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
1747
1748 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1749
1750 if (freq > max_tclk)
1751 dev_warn(&pl022->adev->dev,
1752 "Max speed that can be programmed is %d Hz, you requested %d\n",
1753 max_tclk, freq);
1754
1755 if (freq < min_tclk) {
1756 dev_err(&pl022->adev->dev,
1757 "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1758 freq, min_tclk);
1759 return -EINVAL;
1760 }
1761
1762
1763
1764
1765
1766 while ((cpsdvsr <= CPSDVR_MAX) && !found) {
1767 while (scr <= SCR_MAX) {
1768 tmp = spi_rate(rate, cpsdvsr, scr);
1769
1770 if (tmp > freq) {
1771
1772 scr++;
1773 continue;
1774 }
1775
1776
1777
1778
1779
1780 if (tmp > best_freq) {
1781 best_freq = tmp;
1782 best_cpsdvsr = cpsdvsr;
1783 best_scr = scr;
1784
1785 if (tmp == freq)
1786 found = 1;
1787 }
1788
1789
1790
1791
1792 break;
1793 }
1794 cpsdvsr += 2;
1795 scr = SCR_MIN;
1796 }
1797
1798 WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1799 freq);
1800
1801 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1802 clk_freq->scr = (u8) (best_scr & 0xFF);
1803 dev_dbg(&pl022->adev->dev,
1804 "SSP Target Frequency is: %u, Effective Frequency is %u\n",
1805 freq, best_freq);
1806 dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
1807 clk_freq->cpsdvsr, clk_freq->scr);
1808
1809 return 0;
1810 }
1811
1812
1813
1814
1815
1816 static const struct pl022_config_chip pl022_default_chip_info = {
1817 .com_mode = INTERRUPT_TRANSFER,
1818 .iface = SSP_INTERFACE_MOTOROLA_SPI,
1819 .hierarchy = SSP_MASTER,
1820 .slave_tx_disable = DO_NOT_DRIVE_TX,
1821 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1822 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1823 .ctrl_len = SSP_BITS_8,
1824 .wait_state = SSP_MWIRE_WAIT_ZERO,
1825 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1826 };
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 static int pl022_setup(struct spi_device *spi)
1841 {
1842 struct pl022_config_chip const *chip_info;
1843 struct pl022_config_chip chip_info_dt;
1844 struct chip_data *chip;
1845 struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
1846 int status = 0;
1847 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1848 unsigned int bits = spi->bits_per_word;
1849 u32 tmp;
1850 struct device_node *np = spi->dev.of_node;
1851
1852 if (!spi->max_speed_hz)
1853 return -EINVAL;
1854
1855
1856 chip = spi_get_ctldata(spi);
1857
1858 if (chip == NULL) {
1859 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1860 if (!chip)
1861 return -ENOMEM;
1862 dev_dbg(&spi->dev,
1863 "allocated memory for controller's runtime state\n");
1864 }
1865
1866
1867 chip_info = spi->controller_data;
1868
1869 if (chip_info == NULL) {
1870 if (np) {
1871 chip_info_dt = pl022_default_chip_info;
1872
1873 chip_info_dt.hierarchy = SSP_MASTER;
1874 of_property_read_u32(np, "pl022,interface",
1875 &chip_info_dt.iface);
1876 of_property_read_u32(np, "pl022,com-mode",
1877 &chip_info_dt.com_mode);
1878 of_property_read_u32(np, "pl022,rx-level-trig",
1879 &chip_info_dt.rx_lev_trig);
1880 of_property_read_u32(np, "pl022,tx-level-trig",
1881 &chip_info_dt.tx_lev_trig);
1882 of_property_read_u32(np, "pl022,ctrl-len",
1883 &chip_info_dt.ctrl_len);
1884 of_property_read_u32(np, "pl022,wait-state",
1885 &chip_info_dt.wait_state);
1886 of_property_read_u32(np, "pl022,duplex",
1887 &chip_info_dt.duplex);
1888
1889 chip_info = &chip_info_dt;
1890 } else {
1891 chip_info = &pl022_default_chip_info;
1892
1893 dev_dbg(&spi->dev,
1894 "using default controller_data settings\n");
1895 }
1896 } else
1897 dev_dbg(&spi->dev,
1898 "using user supplied controller_data settings\n");
1899
1900
1901
1902
1903
1904 if ((0 == chip_info->clk_freq.cpsdvsr)
1905 && (0 == chip_info->clk_freq.scr)) {
1906 status = calculate_effective_freq(pl022,
1907 spi->max_speed_hz,
1908 &clk_freq);
1909 if (status < 0)
1910 goto err_config_params;
1911 } else {
1912 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1913 if ((clk_freq.cpsdvsr % 2) != 0)
1914 clk_freq.cpsdvsr =
1915 clk_freq.cpsdvsr - 1;
1916 }
1917 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1918 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1919 status = -EINVAL;
1920 dev_err(&spi->dev,
1921 "cpsdvsr is configured incorrectly\n");
1922 goto err_config_params;
1923 }
1924
1925 status = verify_controller_parameters(pl022, chip_info);
1926 if (status) {
1927 dev_err(&spi->dev, "controller data is incorrect");
1928 goto err_config_params;
1929 }
1930
1931 pl022->rx_lev_trig = chip_info->rx_lev_trig;
1932 pl022->tx_lev_trig = chip_info->tx_lev_trig;
1933
1934
1935 chip->xfer_type = chip_info->com_mode;
1936
1937
1938 if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1939 status = -ENOTSUPP;
1940 dev_err(&spi->dev, "illegal data size for this controller!\n");
1941 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1942 pl022->vendor->max_bpw);
1943 goto err_config_params;
1944 } else if (bits <= 8) {
1945 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1946 chip->n_bytes = 1;
1947 chip->read = READING_U8;
1948 chip->write = WRITING_U8;
1949 } else if (bits <= 16) {
1950 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1951 chip->n_bytes = 2;
1952 chip->read = READING_U16;
1953 chip->write = WRITING_U16;
1954 } else {
1955 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1956 chip->n_bytes = 4;
1957 chip->read = READING_U32;
1958 chip->write = WRITING_U32;
1959 }
1960
1961
1962 chip->cr0 = 0;
1963 chip->cr1 = 0;
1964 chip->dmacr = 0;
1965 chip->cpsr = 0;
1966 if ((chip_info->com_mode == DMA_TRANSFER)
1967 && ((pl022->master_info)->enable_dma)) {
1968 chip->enable_dma = true;
1969 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1970 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1971 SSP_DMACR_MASK_RXDMAE, 0);
1972 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1973 SSP_DMACR_MASK_TXDMAE, 1);
1974 } else {
1975 chip->enable_dma = false;
1976 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1977 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1978 SSP_DMACR_MASK_RXDMAE, 0);
1979 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1980 SSP_DMACR_MASK_TXDMAE, 1);
1981 }
1982
1983 chip->cpsr = clk_freq.cpsdvsr;
1984
1985
1986 if (pl022->vendor->extended_cr) {
1987 u32 etx;
1988
1989 if (pl022->vendor->pl023) {
1990
1991 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1992 SSP_CR1_MASK_FBCLKDEL_ST, 13);
1993 } else {
1994
1995 SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1996 SSP_CR0_MASK_HALFDUP_ST, 5);
1997 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1998 SSP_CR0_MASK_CSS_ST, 16);
1999 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2000 SSP_CR0_MASK_FRF_ST, 21);
2001 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
2002 SSP_CR1_MASK_MWAIT_ST, 6);
2003 }
2004 SSP_WRITE_BITS(chip->cr0, bits - 1,
2005 SSP_CR0_MASK_DSS_ST, 0);
2006
2007 if (spi->mode & SPI_LSB_FIRST) {
2008 tmp = SSP_RX_LSB;
2009 etx = SSP_TX_LSB;
2010 } else {
2011 tmp = SSP_RX_MSB;
2012 etx = SSP_TX_MSB;
2013 }
2014 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
2015 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
2016 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
2017 SSP_CR1_MASK_RXIFLSEL_ST, 7);
2018 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
2019 SSP_CR1_MASK_TXIFLSEL_ST, 10);
2020 } else {
2021 SSP_WRITE_BITS(chip->cr0, bits - 1,
2022 SSP_CR0_MASK_DSS, 0);
2023 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2024 SSP_CR0_MASK_FRF, 4);
2025 }
2026
2027
2028 if (spi->mode & SPI_CPOL)
2029 tmp = SSP_CLK_POL_IDLE_HIGH;
2030 else
2031 tmp = SSP_CLK_POL_IDLE_LOW;
2032 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
2033
2034 if (spi->mode & SPI_CPHA)
2035 tmp = SSP_CLK_SECOND_EDGE;
2036 else
2037 tmp = SSP_CLK_FIRST_EDGE;
2038 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
2039
2040 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
2041
2042 if (pl022->vendor->loopback) {
2043 if (spi->mode & SPI_LOOP)
2044 tmp = LOOPBACK_ENABLED;
2045 else
2046 tmp = LOOPBACK_DISABLED;
2047 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
2048 }
2049 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
2050 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
2051 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
2052 3);
2053
2054
2055 spi_set_ctldata(spi, chip);
2056 return status;
2057 err_config_params:
2058 spi_set_ctldata(spi, NULL);
2059 kfree(chip);
2060 return status;
2061 }
2062
2063
2064
2065
2066
2067
2068
2069
2070 static void pl022_cleanup(struct spi_device *spi)
2071 {
2072 struct chip_data *chip = spi_get_ctldata(spi);
2073
2074 spi_set_ctldata(spi, NULL);
2075 kfree(chip);
2076 }
2077
2078 static struct pl022_ssp_controller *
2079 pl022_platform_data_dt_get(struct device *dev)
2080 {
2081 struct device_node *np = dev->of_node;
2082 struct pl022_ssp_controller *pd;
2083
2084 if (!np) {
2085 dev_err(dev, "no dt node defined\n");
2086 return NULL;
2087 }
2088
2089 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2090 if (!pd)
2091 return NULL;
2092
2093 pd->bus_id = -1;
2094 pd->enable_dma = 1;
2095 of_property_read_u32(np, "pl022,autosuspend-delay",
2096 &pd->autosuspend_delay);
2097 pd->rt = of_property_read_bool(np, "pl022,rt");
2098
2099 return pd;
2100 }
2101
2102 static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2103 {
2104 struct device *dev = &adev->dev;
2105 struct pl022_ssp_controller *platform_info =
2106 dev_get_platdata(&adev->dev);
2107 struct spi_master *master;
2108 struct pl022 *pl022 = NULL;
2109 int status = 0;
2110
2111 dev_info(&adev->dev,
2112 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
2113 if (!platform_info && IS_ENABLED(CONFIG_OF))
2114 platform_info = pl022_platform_data_dt_get(dev);
2115
2116 if (!platform_info) {
2117 dev_err(dev, "probe: no platform data defined\n");
2118 return -ENODEV;
2119 }
2120
2121
2122 master = spi_alloc_master(dev, sizeof(struct pl022));
2123 if (master == NULL) {
2124 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
2125 return -ENOMEM;
2126 }
2127
2128 pl022 = spi_master_get_devdata(master);
2129 pl022->master = master;
2130 pl022->master_info = platform_info;
2131 pl022->adev = adev;
2132 pl022->vendor = id->data;
2133
2134
2135
2136
2137
2138 master->bus_num = platform_info->bus_id;
2139 master->cleanup = pl022_cleanup;
2140 master->setup = pl022_setup;
2141 master->auto_runtime_pm = true;
2142 master->transfer_one_message = pl022_transfer_one_message;
2143 master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2144 master->rt = platform_info->rt;
2145 master->dev.of_node = dev->of_node;
2146 master->use_gpio_descriptors = true;
2147
2148
2149
2150
2151
2152 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2153 if (pl022->vendor->extended_cr)
2154 master->mode_bits |= SPI_LSB_FIRST;
2155
2156 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
2157
2158 status = amba_request_regions(adev, NULL);
2159 if (status)
2160 goto err_no_ioregion;
2161
2162 pl022->phybase = adev->res.start;
2163 pl022->virtbase = devm_ioremap(dev, adev->res.start,
2164 resource_size(&adev->res));
2165 if (pl022->virtbase == NULL) {
2166 status = -ENOMEM;
2167 goto err_no_ioremap;
2168 }
2169 dev_info(&adev->dev, "mapped registers from %pa to %p\n",
2170 &adev->res.start, pl022->virtbase);
2171
2172 pl022->clk = devm_clk_get(&adev->dev, NULL);
2173 if (IS_ERR(pl022->clk)) {
2174 status = PTR_ERR(pl022->clk);
2175 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
2176 goto err_no_clk;
2177 }
2178
2179 status = clk_prepare_enable(pl022->clk);
2180 if (status) {
2181 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2182 goto err_no_clk_en;
2183 }
2184
2185
2186 tasklet_init(&pl022->pump_transfers, pump_transfers,
2187 (unsigned long)pl022);
2188
2189
2190 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2191 SSP_CR1(pl022->virtbase));
2192 load_ssp_default_config(pl022);
2193
2194 status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
2195 0, "pl022", pl022);
2196 if (status < 0) {
2197 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
2198 goto err_no_irq;
2199 }
2200
2201
2202 status = pl022_dma_autoprobe(pl022);
2203 if (status == -EPROBE_DEFER) {
2204 dev_dbg(dev, "deferring probe to get DMA channel\n");
2205 goto err_no_irq;
2206 }
2207
2208
2209 if (status == 0)
2210 platform_info->enable_dma = 1;
2211 else if (platform_info->enable_dma) {
2212 status = pl022_dma_probe(pl022);
2213 if (status != 0)
2214 platform_info->enable_dma = 0;
2215 }
2216
2217
2218 amba_set_drvdata(adev, pl022);
2219 status = devm_spi_register_master(&adev->dev, master);
2220 if (status != 0) {
2221 dev_err(&adev->dev,
2222 "probe - problem registering spi master\n");
2223 goto err_spi_register;
2224 }
2225 dev_dbg(dev, "probe succeeded\n");
2226
2227
2228 if (platform_info->autosuspend_delay > 0) {
2229 dev_info(&adev->dev,
2230 "will use autosuspend for runtime pm, delay %dms\n",
2231 platform_info->autosuspend_delay);
2232 pm_runtime_set_autosuspend_delay(dev,
2233 platform_info->autosuspend_delay);
2234 pm_runtime_use_autosuspend(dev);
2235 }
2236 pm_runtime_put(dev);
2237
2238 return 0;
2239
2240 err_spi_register:
2241 if (platform_info->enable_dma)
2242 pl022_dma_remove(pl022);
2243 err_no_irq:
2244 clk_disable_unprepare(pl022->clk);
2245 err_no_clk_en:
2246 err_no_clk:
2247 err_no_ioremap:
2248 amba_release_regions(adev);
2249 err_no_ioregion:
2250 spi_master_put(master);
2251 return status;
2252 }
2253
2254 static void
2255 pl022_remove(struct amba_device *adev)
2256 {
2257 struct pl022 *pl022 = amba_get_drvdata(adev);
2258
2259 if (!pl022)
2260 return;
2261
2262
2263
2264
2265
2266 pm_runtime_get_noresume(&adev->dev);
2267
2268 load_ssp_default_config(pl022);
2269 if (pl022->master_info->enable_dma)
2270 pl022_dma_remove(pl022);
2271
2272 clk_disable_unprepare(pl022->clk);
2273 amba_release_regions(adev);
2274 tasklet_disable(&pl022->pump_transfers);
2275 }
2276
2277 #ifdef CONFIG_PM_SLEEP
2278 static int pl022_suspend(struct device *dev)
2279 {
2280 struct pl022 *pl022 = dev_get_drvdata(dev);
2281 int ret;
2282
2283 ret = spi_master_suspend(pl022->master);
2284 if (ret)
2285 return ret;
2286
2287 ret = pm_runtime_force_suspend(dev);
2288 if (ret) {
2289 spi_master_resume(pl022->master);
2290 return ret;
2291 }
2292
2293 pinctrl_pm_select_sleep_state(dev);
2294
2295 dev_dbg(dev, "suspended\n");
2296 return 0;
2297 }
2298
2299 static int pl022_resume(struct device *dev)
2300 {
2301 struct pl022 *pl022 = dev_get_drvdata(dev);
2302 int ret;
2303
2304 ret = pm_runtime_force_resume(dev);
2305 if (ret)
2306 dev_err(dev, "problem resuming\n");
2307
2308
2309 ret = spi_master_resume(pl022->master);
2310 if (!ret)
2311 dev_dbg(dev, "resumed\n");
2312
2313 return ret;
2314 }
2315 #endif
2316
2317 #ifdef CONFIG_PM
2318 static int pl022_runtime_suspend(struct device *dev)
2319 {
2320 struct pl022 *pl022 = dev_get_drvdata(dev);
2321
2322 clk_disable_unprepare(pl022->clk);
2323 pinctrl_pm_select_idle_state(dev);
2324
2325 return 0;
2326 }
2327
2328 static int pl022_runtime_resume(struct device *dev)
2329 {
2330 struct pl022 *pl022 = dev_get_drvdata(dev);
2331
2332 pinctrl_pm_select_default_state(dev);
2333 clk_prepare_enable(pl022->clk);
2334
2335 return 0;
2336 }
2337 #endif
2338
2339 static const struct dev_pm_ops pl022_dev_pm_ops = {
2340 SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
2341 SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
2342 };
2343
2344 static struct vendor_data vendor_arm = {
2345 .fifodepth = 8,
2346 .max_bpw = 16,
2347 .unidir = false,
2348 .extended_cr = false,
2349 .pl023 = false,
2350 .loopback = true,
2351 .internal_cs_ctrl = false,
2352 };
2353
2354 static struct vendor_data vendor_st = {
2355 .fifodepth = 32,
2356 .max_bpw = 32,
2357 .unidir = false,
2358 .extended_cr = true,
2359 .pl023 = false,
2360 .loopback = true,
2361 .internal_cs_ctrl = false,
2362 };
2363
2364 static struct vendor_data vendor_st_pl023 = {
2365 .fifodepth = 32,
2366 .max_bpw = 32,
2367 .unidir = false,
2368 .extended_cr = true,
2369 .pl023 = true,
2370 .loopback = false,
2371 .internal_cs_ctrl = false,
2372 };
2373
2374 static struct vendor_data vendor_lsi = {
2375 .fifodepth = 8,
2376 .max_bpw = 16,
2377 .unidir = false,
2378 .extended_cr = false,
2379 .pl023 = false,
2380 .loopback = true,
2381 .internal_cs_ctrl = true,
2382 };
2383
2384 static const struct amba_id pl022_ids[] = {
2385 {
2386
2387
2388
2389
2390 .id = 0x00041022,
2391 .mask = 0x000fffff,
2392 .data = &vendor_arm,
2393 },
2394 {
2395
2396
2397
2398
2399 .id = 0x01080022,
2400 .mask = 0xffffffff,
2401 .data = &vendor_st,
2402 },
2403 {
2404
2405
2406
2407
2408
2409
2410
2411 .id = 0x00080023,
2412 .mask = 0xffffffff,
2413 .data = &vendor_st_pl023,
2414 },
2415 {
2416
2417
2418
2419
2420 .id = 0x000b6022,
2421 .mask = 0x000fffff,
2422 .data = &vendor_lsi,
2423 },
2424 { 0, 0 },
2425 };
2426
2427 MODULE_DEVICE_TABLE(amba, pl022_ids);
2428
2429 static struct amba_driver pl022_driver = {
2430 .drv = {
2431 .name = "ssp-pl022",
2432 .pm = &pl022_dev_pm_ops,
2433 },
2434 .id_table = pl022_ids,
2435 .probe = pl022_probe,
2436 .remove = pl022_remove,
2437 };
2438
2439 static int __init pl022_init(void)
2440 {
2441 return amba_driver_register(&pl022_driver);
2442 }
2443 subsys_initcall(pl022_init);
2444
2445 static void __exit pl022_exit(void)
2446 {
2447 amba_driver_unregister(&pl022_driver);
2448 }
2449 module_exit(pl022_exit);
2450
2451 MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
2452 MODULE_DESCRIPTION("PL022 SSP Controller Driver");
2453 MODULE_LICENSE("GPL");