0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/delay.h>
0009 #include <linux/pci.h>
0010 #include <linux/wait.h>
0011 #include <linux/spi/spi.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/sched.h>
0014 #include <linux/spi/spidev.h>
0015 #include <linux/module.h>
0016 #include <linux/device.h>
0017 #include <linux/platform_device.h>
0018
0019 #include <linux/dmaengine.h>
0020 #include <linux/pch_dma.h>
0021
0022
0023 #define PCH_SPCR 0x00
0024 #define PCH_SPBRR 0x04
0025 #define PCH_SPSR 0x08
0026 #define PCH_SPDWR 0x0C
0027 #define PCH_SPDRR 0x10
0028 #define PCH_SSNXCR 0x18
0029 #define PCH_SRST 0x1C
0030 #define PCH_ADDRESS_SIZE 0x20
0031
0032 #define PCH_SPSR_TFD 0x000007C0
0033 #define PCH_SPSR_RFD 0x0000F800
0034
0035 #define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
0036 #define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
0037
0038 #define PCH_RX_THOLD 7
0039 #define PCH_RX_THOLD_MAX 15
0040
0041 #define PCH_TX_THOLD 2
0042
0043 #define PCH_MAX_BAUDRATE 5000000
0044 #define PCH_MAX_FIFO_DEPTH 16
0045
0046 #define STATUS_RUNNING 1
0047 #define STATUS_EXITING 2
0048 #define PCH_SLEEP_TIME 10
0049
0050 #define SSN_LOW 0x02U
0051 #define SSN_HIGH 0x03U
0052 #define SSN_NO_CONTROL 0x00U
0053 #define PCH_MAX_CS 0xFF
0054 #define PCI_DEVICE_ID_GE_SPI 0x8816
0055
0056 #define SPCR_SPE_BIT (1 << 0)
0057 #define SPCR_MSTR_BIT (1 << 1)
0058 #define SPCR_LSBF_BIT (1 << 4)
0059 #define SPCR_CPHA_BIT (1 << 5)
0060 #define SPCR_CPOL_BIT (1 << 6)
0061 #define SPCR_TFIE_BIT (1 << 8)
0062 #define SPCR_RFIE_BIT (1 << 9)
0063 #define SPCR_FIE_BIT (1 << 10)
0064 #define SPCR_ORIE_BIT (1 << 11)
0065 #define SPCR_MDFIE_BIT (1 << 12)
0066 #define SPCR_FICLR_BIT (1 << 24)
0067 #define SPSR_TFI_BIT (1 << 0)
0068 #define SPSR_RFI_BIT (1 << 1)
0069 #define SPSR_FI_BIT (1 << 2)
0070 #define SPSR_ORF_BIT (1 << 3)
0071 #define SPBRR_SIZE_BIT (1 << 10)
0072
0073 #define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
0074 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
0075
0076 #define SPCR_RFIC_FIELD 20
0077 #define SPCR_TFIC_FIELD 16
0078
0079 #define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
0080 #define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
0081 #define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
0082
0083 #define PCH_CLOCK_HZ 50000000
0084 #define PCH_MAX_SPBR 1023
0085
0086
0087 #define PCI_DEVICE_ID_ML7213_SPI 0x802c
0088 #define PCI_DEVICE_ID_ML7223_SPI 0x800F
0089 #define PCI_DEVICE_ID_ML7831_SPI 0x8816
0090
0091
0092
0093
0094
0095
0096
0097
0098 #define PCH_SPI_MAX_DEV 2
0099
0100 #define PCH_BUF_SIZE 4096
0101 #define PCH_DMA_TRANS_SIZE 12
0102
0103 static int use_dma = 1;
0104
0105 struct pch_spi_dma_ctrl {
0106 struct pci_dev *dma_dev;
0107 struct dma_async_tx_descriptor *desc_tx;
0108 struct dma_async_tx_descriptor *desc_rx;
0109 struct pch_dma_slave param_tx;
0110 struct pch_dma_slave param_rx;
0111 struct dma_chan *chan_tx;
0112 struct dma_chan *chan_rx;
0113 struct scatterlist *sg_tx_p;
0114 struct scatterlist *sg_rx_p;
0115 struct scatterlist sg_tx;
0116 struct scatterlist sg_rx;
0117 int nent;
0118 void *tx_buf_virt;
0119 void *rx_buf_virt;
0120 dma_addr_t tx_buf_dma;
0121 dma_addr_t rx_buf_dma;
0122 };
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 struct pch_spi_data {
0162 void __iomem *io_remap_addr;
0163 unsigned long io_base_addr;
0164 struct spi_master *master;
0165 struct work_struct work;
0166 wait_queue_head_t wait;
0167 u8 transfer_complete;
0168 u8 bcurrent_msg_processing;
0169 spinlock_t lock;
0170 struct list_head queue;
0171 u8 status;
0172 u32 bpw_len;
0173 u8 transfer_active;
0174 u32 tx_index;
0175 u32 rx_index;
0176 u16 *pkt_tx_buff;
0177 u16 *pkt_rx_buff;
0178 u8 n_curnt_chip;
0179 struct spi_device *current_chip;
0180 struct spi_message *current_msg;
0181 struct spi_transfer *cur_trans;
0182 struct pch_spi_board_data *board_dat;
0183 struct platform_device *plat_dev;
0184 int ch;
0185 struct pch_spi_dma_ctrl dma;
0186 int use_dma;
0187 u8 irq_reg_sts;
0188 int save_total_len;
0189 };
0190
0191
0192
0193
0194
0195
0196
0197 struct pch_spi_board_data {
0198 struct pci_dev *pdev;
0199 u8 suspend_sts;
0200 int num;
0201 };
0202
0203 struct pch_pd_dev_save {
0204 int num;
0205 struct platform_device *pd_save[PCH_SPI_MAX_DEV];
0206 struct pch_spi_board_data *board_dat;
0207 };
0208
0209 static const struct pci_device_id pch_spi_pcidev_id[] = {
0210 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
0211 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
0212 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
0213 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
0214 { }
0215 };
0216
0217
0218
0219
0220
0221
0222
0223 static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
0224 {
0225 struct pch_spi_data *data = spi_master_get_devdata(master);
0226 iowrite32(val, (data->io_remap_addr + idx));
0227 }
0228
0229
0230
0231
0232
0233
0234 static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
0235 {
0236 struct pch_spi_data *data = spi_master_get_devdata(master);
0237 return ioread32(data->io_remap_addr + idx);
0238 }
0239
0240 static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
0241 u32 set, u32 clr)
0242 {
0243 u32 tmp = pch_spi_readreg(master, idx);
0244 tmp = (tmp & ~clr) | set;
0245 pch_spi_writereg(master, idx, tmp);
0246 }
0247
0248 static void pch_spi_set_master_mode(struct spi_master *master)
0249 {
0250 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
0251 }
0252
0253
0254
0255
0256
0257 static void pch_spi_clear_fifo(struct spi_master *master)
0258 {
0259 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
0260 pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
0261 }
0262
0263 static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
0264 void __iomem *io_remap_addr)
0265 {
0266 u32 n_read, tx_index, rx_index, bpw_len;
0267 u16 *pkt_rx_buffer, *pkt_tx_buff;
0268 int read_cnt;
0269 u32 reg_spcr_val;
0270 void __iomem *spsr;
0271 void __iomem *spdrr;
0272 void __iomem *spdwr;
0273
0274 spsr = io_remap_addr + PCH_SPSR;
0275 iowrite32(reg_spsr_val, spsr);
0276
0277 if (data->transfer_active) {
0278 rx_index = data->rx_index;
0279 tx_index = data->tx_index;
0280 bpw_len = data->bpw_len;
0281 pkt_rx_buffer = data->pkt_rx_buff;
0282 pkt_tx_buff = data->pkt_tx_buff;
0283
0284 spdrr = io_remap_addr + PCH_SPDRR;
0285 spdwr = io_remap_addr + PCH_SPDWR;
0286
0287 n_read = PCH_READABLE(reg_spsr_val);
0288
0289 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
0290 pkt_rx_buffer[rx_index++] = ioread32(spdrr);
0291 if (tx_index < bpw_len)
0292 iowrite32(pkt_tx_buff[tx_index++], spdwr);
0293 }
0294
0295
0296 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
0297 reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
0298 reg_spcr_val &= ~SPCR_RFIE_BIT;
0299
0300
0301 reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
0302 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
0303
0304 iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
0305 }
0306
0307
0308 data->tx_index = tx_index;
0309 data->rx_index = rx_index;
0310
0311
0312 if (reg_spsr_val & SPSR_FI_BIT) {
0313 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
0314
0315 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
0316 PCH_ALL);
0317
0318
0319
0320 data->transfer_complete = true;
0321 data->transfer_active = false;
0322 wake_up(&data->wait);
0323 } else {
0324 dev_vdbg(&data->master->dev,
0325 "%s : Transfer is not completed",
0326 __func__);
0327 }
0328 }
0329 }
0330 }
0331
0332
0333
0334
0335
0336
0337 static irqreturn_t pch_spi_handler(int irq, void *dev_id)
0338 {
0339 u32 reg_spsr_val;
0340 void __iomem *spsr;
0341 void __iomem *io_remap_addr;
0342 irqreturn_t ret = IRQ_NONE;
0343 struct pch_spi_data *data = dev_id;
0344 struct pch_spi_board_data *board_dat = data->board_dat;
0345
0346 if (board_dat->suspend_sts) {
0347 dev_dbg(&board_dat->pdev->dev,
0348 "%s returning due to suspend\n", __func__);
0349 return IRQ_NONE;
0350 }
0351
0352 io_remap_addr = data->io_remap_addr;
0353 spsr = io_remap_addr + PCH_SPSR;
0354
0355 reg_spsr_val = ioread32(spsr);
0356
0357 if (reg_spsr_val & SPSR_ORF_BIT) {
0358 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
0359 if (data->current_msg->complete) {
0360 data->transfer_complete = true;
0361 data->current_msg->status = -EIO;
0362 data->current_msg->complete(data->current_msg->context);
0363 data->bcurrent_msg_processing = false;
0364 data->current_msg = NULL;
0365 data->cur_trans = NULL;
0366 }
0367 }
0368
0369 if (data->use_dma)
0370 return IRQ_NONE;
0371
0372
0373 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
0374 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
0375 ret = IRQ_HANDLED;
0376 }
0377
0378 dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
0379 __func__, ret);
0380
0381 return ret;
0382 }
0383
0384
0385
0386
0387
0388
0389 static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
0390 {
0391 u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
0392
0393
0394 if (n_spbr > PCH_MAX_SPBR)
0395 n_spbr = PCH_MAX_SPBR;
0396
0397 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
0398 }
0399
0400
0401
0402
0403
0404
0405 static void pch_spi_set_bits_per_word(struct spi_master *master,
0406 u8 bits_per_word)
0407 {
0408 if (bits_per_word == 8)
0409 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
0410 else
0411 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
0412 }
0413
0414
0415
0416
0417
0418 static void pch_spi_setup_transfer(struct spi_device *spi)
0419 {
0420 u32 flags = 0;
0421
0422 dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
0423 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
0424 spi->max_speed_hz);
0425 pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
0426
0427
0428 pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
0429
0430 if (!(spi->mode & SPI_LSB_FIRST))
0431 flags |= SPCR_LSBF_BIT;
0432 if (spi->mode & SPI_CPOL)
0433 flags |= SPCR_CPOL_BIT;
0434 if (spi->mode & SPI_CPHA)
0435 flags |= SPCR_CPHA_BIT;
0436 pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
0437 (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
0438
0439
0440 pch_spi_clear_fifo(spi->master);
0441 }
0442
0443
0444
0445
0446
0447 static void pch_spi_reset(struct spi_master *master)
0448 {
0449
0450 pch_spi_writereg(master, PCH_SRST, 0x1);
0451
0452
0453 pch_spi_writereg(master, PCH_SRST, 0x0);
0454 }
0455
0456 static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
0457 {
0458 struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
0459 int retval;
0460 unsigned long flags;
0461
0462
0463 if (data->status == STATUS_EXITING) {
0464 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
0465 retval = -ESHUTDOWN;
0466 goto err_out;
0467 }
0468
0469
0470 if (data->board_dat->suspend_sts) {
0471 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
0472 retval = -EINVAL;
0473 goto err_out;
0474 }
0475
0476
0477 pmsg->actual_length = 0;
0478 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
0479
0480 pmsg->status = -EINPROGRESS;
0481 spin_lock_irqsave(&data->lock, flags);
0482
0483 list_add_tail(&pmsg->queue, &data->queue);
0484 spin_unlock_irqrestore(&data->lock, flags);
0485
0486 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
0487
0488 schedule_work(&data->work);
0489 dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
0490
0491 retval = 0;
0492
0493 err_out:
0494 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
0495 return retval;
0496 }
0497
0498 static inline void pch_spi_select_chip(struct pch_spi_data *data,
0499 struct spi_device *pspi)
0500 {
0501 if (data->current_chip != NULL) {
0502 if (pspi->chip_select != data->n_curnt_chip) {
0503 dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
0504 data->current_chip = NULL;
0505 }
0506 }
0507
0508 data->current_chip = pspi;
0509
0510 data->n_curnt_chip = data->current_chip->chip_select;
0511
0512 dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
0513 pch_spi_setup_transfer(pspi);
0514 }
0515
0516 static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
0517 {
0518 int size;
0519 u32 n_writes;
0520 int j;
0521 struct spi_message *pmsg, *tmp;
0522 const u8 *tx_buf;
0523 const u16 *tx_sbuf;
0524
0525
0526 if (data->cur_trans->speed_hz) {
0527 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
0528 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
0529 }
0530
0531
0532 if (data->cur_trans->bits_per_word &&
0533 (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
0534 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
0535 pch_spi_set_bits_per_word(data->master,
0536 data->cur_trans->bits_per_word);
0537 *bpw = data->cur_trans->bits_per_word;
0538 } else {
0539 *bpw = data->current_msg->spi->bits_per_word;
0540 }
0541
0542
0543 data->tx_index = 0;
0544 data->rx_index = 0;
0545
0546 data->bpw_len = data->cur_trans->len / (*bpw / 8);
0547
0548
0549 size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
0550
0551
0552 data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
0553 if (data->pkt_tx_buff != NULL) {
0554 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
0555 if (!data->pkt_rx_buff) {
0556 kfree(data->pkt_tx_buff);
0557 data->pkt_tx_buff = NULL;
0558 }
0559 }
0560
0561 if (!data->pkt_rx_buff) {
0562
0563 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
0564 pmsg->status = -ENOMEM;
0565
0566 if (pmsg->complete)
0567 pmsg->complete(pmsg->context);
0568
0569
0570 list_del_init(&pmsg->queue);
0571 }
0572 return;
0573 }
0574
0575
0576 if (data->cur_trans->tx_buf != NULL) {
0577 if (*bpw == 8) {
0578 tx_buf = data->cur_trans->tx_buf;
0579 for (j = 0; j < data->bpw_len; j++)
0580 data->pkt_tx_buff[j] = *tx_buf++;
0581 } else {
0582 tx_sbuf = data->cur_trans->tx_buf;
0583 for (j = 0; j < data->bpw_len; j++)
0584 data->pkt_tx_buff[j] = *tx_sbuf++;
0585 }
0586 }
0587
0588
0589 n_writes = data->bpw_len;
0590 if (n_writes > PCH_MAX_FIFO_DEPTH)
0591 n_writes = PCH_MAX_FIFO_DEPTH;
0592
0593 dev_dbg(&data->master->dev,
0594 "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
0595 __func__);
0596 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
0597
0598 for (j = 0; j < n_writes; j++)
0599 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
0600
0601
0602 data->tx_index = j;
0603
0604
0605 data->transfer_complete = false;
0606 data->transfer_active = true;
0607 }
0608
0609 static void pch_spi_nomore_transfer(struct pch_spi_data *data)
0610 {
0611 struct spi_message *pmsg, *tmp;
0612 dev_dbg(&data->master->dev, "%s called\n", __func__);
0613
0614
0615 data->current_msg->status = 0;
0616
0617 if (data->current_msg->complete) {
0618 dev_dbg(&data->master->dev,
0619 "%s:Invoking callback of SPI core\n", __func__);
0620 data->current_msg->complete(data->current_msg->context);
0621 }
0622
0623
0624 data->bcurrent_msg_processing = false;
0625
0626 dev_dbg(&data->master->dev,
0627 "%s:data->bcurrent_msg_processing = false\n", __func__);
0628
0629 data->current_msg = NULL;
0630 data->cur_trans = NULL;
0631
0632
0633
0634 if ((list_empty(&data->queue) == 0) &&
0635 (!data->board_dat->suspend_sts) &&
0636 (data->status != STATUS_EXITING)) {
0637
0638
0639
0640
0641 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
0642 schedule_work(&data->work);
0643 } else if (data->board_dat->suspend_sts ||
0644 data->status == STATUS_EXITING) {
0645 dev_dbg(&data->master->dev,
0646 "%s suspend/remove initiated, flushing queue\n",
0647 __func__);
0648 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
0649 pmsg->status = -EIO;
0650
0651 if (pmsg->complete)
0652 pmsg->complete(pmsg->context);
0653
0654
0655 list_del_init(&pmsg->queue);
0656 }
0657 }
0658 }
0659
0660 static void pch_spi_set_ir(struct pch_spi_data *data)
0661 {
0662
0663 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
0664
0665 pch_spi_setclr_reg(data->master, PCH_SPCR,
0666 PCH_RX_THOLD << SPCR_RFIC_FIELD |
0667 SPCR_FIE_BIT | SPCR_RFIE_BIT |
0668 SPCR_ORIE_BIT | SPCR_SPE_BIT,
0669 MASK_RFIC_SPCR_BITS | PCH_ALL);
0670 else
0671
0672 pch_spi_setclr_reg(data->master, PCH_SPCR,
0673 PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
0674 SPCR_FIE_BIT | SPCR_ORIE_BIT |
0675 SPCR_SPE_BIT,
0676 MASK_RFIC_SPCR_BITS | PCH_ALL);
0677
0678
0679
0680 dev_dbg(&data->master->dev,
0681 "%s:waiting for transfer to get over\n", __func__);
0682
0683 wait_event_interruptible(data->wait, data->transfer_complete);
0684
0685
0686 pch_spi_writereg(data->master, PCH_SPSR,
0687 pch_spi_readreg(data->master, PCH_SPSR));
0688
0689 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
0690
0691 pch_spi_clear_fifo(data->master);
0692 }
0693
0694 static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
0695 {
0696 int j;
0697 u8 *rx_buf;
0698 u16 *rx_sbuf;
0699
0700
0701 if (!data->cur_trans->rx_buf)
0702 return;
0703
0704 if (bpw == 8) {
0705 rx_buf = data->cur_trans->rx_buf;
0706 for (j = 0; j < data->bpw_len; j++)
0707 *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
0708 } else {
0709 rx_sbuf = data->cur_trans->rx_buf;
0710 for (j = 0; j < data->bpw_len; j++)
0711 *rx_sbuf++ = data->pkt_rx_buff[j];
0712 }
0713 }
0714
0715 static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
0716 {
0717 int j;
0718 u8 *rx_buf;
0719 u16 *rx_sbuf;
0720 const u8 *rx_dma_buf;
0721 const u16 *rx_dma_sbuf;
0722
0723
0724 if (!data->cur_trans->rx_buf)
0725 return;
0726
0727 if (bpw == 8) {
0728 rx_buf = data->cur_trans->rx_buf;
0729 rx_dma_buf = data->dma.rx_buf_virt;
0730 for (j = 0; j < data->bpw_len; j++)
0731 *rx_buf++ = *rx_dma_buf++ & 0xFF;
0732 data->cur_trans->rx_buf = rx_buf;
0733 } else {
0734 rx_sbuf = data->cur_trans->rx_buf;
0735 rx_dma_sbuf = data->dma.rx_buf_virt;
0736 for (j = 0; j < data->bpw_len; j++)
0737 *rx_sbuf++ = *rx_dma_sbuf++;
0738 data->cur_trans->rx_buf = rx_sbuf;
0739 }
0740 }
0741
0742 static int pch_spi_start_transfer(struct pch_spi_data *data)
0743 {
0744 struct pch_spi_dma_ctrl *dma;
0745 unsigned long flags;
0746 int rtn;
0747
0748 dma = &data->dma;
0749
0750 spin_lock_irqsave(&data->lock, flags);
0751
0752
0753 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
0754
0755 spin_unlock_irqrestore(&data->lock, flags);
0756
0757
0758
0759 dev_dbg(&data->master->dev,
0760 "%s:waiting for transfer to get over\n", __func__);
0761 rtn = wait_event_interruptible_timeout(data->wait,
0762 data->transfer_complete,
0763 msecs_to_jiffies(2 * HZ));
0764 if (!rtn)
0765 dev_err(&data->master->dev,
0766 "%s wait-event timeout\n", __func__);
0767
0768 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
0769 DMA_FROM_DEVICE);
0770
0771 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
0772 DMA_FROM_DEVICE);
0773 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
0774
0775 async_tx_ack(dma->desc_rx);
0776 async_tx_ack(dma->desc_tx);
0777 kfree(dma->sg_tx_p);
0778 kfree(dma->sg_rx_p);
0779
0780 spin_lock_irqsave(&data->lock, flags);
0781
0782
0783 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
0784 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
0785 SPCR_SPE_BIT);
0786
0787 pch_spi_writereg(data->master, PCH_SPSR,
0788 pch_spi_readreg(data->master, PCH_SPSR));
0789
0790 pch_spi_clear_fifo(data->master);
0791
0792 spin_unlock_irqrestore(&data->lock, flags);
0793
0794 return rtn;
0795 }
0796
0797 static void pch_dma_rx_complete(void *arg)
0798 {
0799 struct pch_spi_data *data = arg;
0800
0801
0802 data->transfer_complete = true;
0803 wake_up_interruptible(&data->wait);
0804 }
0805
0806 static bool pch_spi_filter(struct dma_chan *chan, void *slave)
0807 {
0808 struct pch_dma_slave *param = slave;
0809
0810 if ((chan->chan_id == param->chan_id) &&
0811 (param->dma_dev == chan->device->dev)) {
0812 chan->private = param;
0813 return true;
0814 } else {
0815 return false;
0816 }
0817 }
0818
0819 static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
0820 {
0821 dma_cap_mask_t mask;
0822 struct dma_chan *chan;
0823 struct pci_dev *dma_dev;
0824 struct pch_dma_slave *param;
0825 struct pch_spi_dma_ctrl *dma;
0826 unsigned int width;
0827
0828 if (bpw == 8)
0829 width = PCH_DMA_WIDTH_1_BYTE;
0830 else
0831 width = PCH_DMA_WIDTH_2_BYTES;
0832
0833 dma = &data->dma;
0834 dma_cap_zero(mask);
0835 dma_cap_set(DMA_SLAVE, mask);
0836
0837
0838 dma_dev = pci_get_slot(data->board_dat->pdev->bus,
0839 PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
0840
0841
0842 param = &dma->param_tx;
0843 param->dma_dev = &dma_dev->dev;
0844 param->chan_id = data->ch * 2;
0845 param->tx_reg = data->io_base_addr + PCH_SPDWR;
0846 param->width = width;
0847 chan = dma_request_channel(mask, pch_spi_filter, param);
0848 if (!chan) {
0849 dev_err(&data->master->dev,
0850 "ERROR: dma_request_channel FAILS(Tx)\n");
0851 goto out;
0852 }
0853 dma->chan_tx = chan;
0854
0855
0856 param = &dma->param_rx;
0857 param->dma_dev = &dma_dev->dev;
0858 param->chan_id = data->ch * 2 + 1;
0859 param->rx_reg = data->io_base_addr + PCH_SPDRR;
0860 param->width = width;
0861 chan = dma_request_channel(mask, pch_spi_filter, param);
0862 if (!chan) {
0863 dev_err(&data->master->dev,
0864 "ERROR: dma_request_channel FAILS(Rx)\n");
0865 dma_release_channel(dma->chan_tx);
0866 dma->chan_tx = NULL;
0867 goto out;
0868 }
0869 dma->chan_rx = chan;
0870
0871 dma->dma_dev = dma_dev;
0872 return;
0873 out:
0874 pci_dev_put(dma_dev);
0875 data->use_dma = 0;
0876 }
0877
0878 static void pch_spi_release_dma(struct pch_spi_data *data)
0879 {
0880 struct pch_spi_dma_ctrl *dma;
0881
0882 dma = &data->dma;
0883 if (dma->chan_tx) {
0884 dma_release_channel(dma->chan_tx);
0885 dma->chan_tx = NULL;
0886 }
0887 if (dma->chan_rx) {
0888 dma_release_channel(dma->chan_rx);
0889 dma->chan_rx = NULL;
0890 }
0891
0892 pci_dev_put(dma->dma_dev);
0893 }
0894
0895 static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
0896 {
0897 const u8 *tx_buf;
0898 const u16 *tx_sbuf;
0899 u8 *tx_dma_buf;
0900 u16 *tx_dma_sbuf;
0901 struct scatterlist *sg;
0902 struct dma_async_tx_descriptor *desc_tx;
0903 struct dma_async_tx_descriptor *desc_rx;
0904 int num;
0905 int i;
0906 int size;
0907 int rem;
0908 int head;
0909 unsigned long flags;
0910 struct pch_spi_dma_ctrl *dma;
0911
0912 dma = &data->dma;
0913
0914
0915 if (data->cur_trans->speed_hz) {
0916 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
0917 spin_lock_irqsave(&data->lock, flags);
0918 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
0919 spin_unlock_irqrestore(&data->lock, flags);
0920 }
0921
0922
0923 if (data->cur_trans->bits_per_word &&
0924 (data->current_msg->spi->bits_per_word !=
0925 data->cur_trans->bits_per_word)) {
0926 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
0927 spin_lock_irqsave(&data->lock, flags);
0928 pch_spi_set_bits_per_word(data->master,
0929 data->cur_trans->bits_per_word);
0930 spin_unlock_irqrestore(&data->lock, flags);
0931 *bpw = data->cur_trans->bits_per_word;
0932 } else {
0933 *bpw = data->current_msg->spi->bits_per_word;
0934 }
0935 data->bpw_len = data->cur_trans->len / (*bpw / 8);
0936
0937 if (data->bpw_len > PCH_BUF_SIZE) {
0938 data->bpw_len = PCH_BUF_SIZE;
0939 data->cur_trans->len -= PCH_BUF_SIZE;
0940 }
0941
0942
0943 if (data->cur_trans->tx_buf != NULL) {
0944 if (*bpw == 8) {
0945 tx_buf = data->cur_trans->tx_buf;
0946 tx_dma_buf = dma->tx_buf_virt;
0947 for (i = 0; i < data->bpw_len; i++)
0948 *tx_dma_buf++ = *tx_buf++;
0949 } else {
0950 tx_sbuf = data->cur_trans->tx_buf;
0951 tx_dma_sbuf = dma->tx_buf_virt;
0952 for (i = 0; i < data->bpw_len; i++)
0953 *tx_dma_sbuf++ = *tx_sbuf++;
0954 }
0955 }
0956
0957
0958 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
0959 if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
0960 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
0961 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
0962 } else {
0963 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
0964 rem = PCH_DMA_TRANS_SIZE;
0965 }
0966 size = PCH_DMA_TRANS_SIZE;
0967 } else {
0968 num = 1;
0969 size = data->bpw_len;
0970 rem = data->bpw_len;
0971 }
0972 dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
0973 __func__, num, size, rem);
0974 spin_lock_irqsave(&data->lock, flags);
0975
0976
0977 pch_spi_setclr_reg(data->master, PCH_SPCR,
0978 ((size - 1) << SPCR_RFIC_FIELD) |
0979 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
0980 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
0981
0982 spin_unlock_irqrestore(&data->lock, flags);
0983
0984
0985 dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
0986 if (!dma->sg_rx_p)
0987 return;
0988
0989 sg_init_table(dma->sg_rx_p, num);
0990
0991 sg = dma->sg_rx_p;
0992 for (i = 0; i < num; i++, sg++) {
0993 if (i == (num - 2)) {
0994 sg->offset = size * i;
0995 sg->offset = sg->offset * (*bpw / 8);
0996 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
0997 sg->offset);
0998 sg_dma_len(sg) = rem;
0999 } else if (i == (num - 1)) {
1000 sg->offset = size * (i - 1) + rem;
1001 sg->offset = sg->offset * (*bpw / 8);
1002 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1003 sg->offset);
1004 sg_dma_len(sg) = size;
1005 } else {
1006 sg->offset = size * i;
1007 sg->offset = sg->offset * (*bpw / 8);
1008 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1009 sg->offset);
1010 sg_dma_len(sg) = size;
1011 }
1012 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1013 }
1014 sg = dma->sg_rx_p;
1015 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1016 num, DMA_DEV_TO_MEM,
1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1018 if (!desc_rx) {
1019 dev_err(&data->master->dev,
1020 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1021 return;
1022 }
1023 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1024 desc_rx->callback = pch_dma_rx_complete;
1025 desc_rx->callback_param = data;
1026 dma->nent = num;
1027 dma->desc_rx = desc_rx;
1028
1029
1030 if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1031 head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1032 if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1033 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1034 rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1035 } else {
1036 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1037 rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1038 PCH_DMA_TRANS_SIZE - head;
1039 }
1040 size = PCH_DMA_TRANS_SIZE;
1041 } else {
1042 num = 1;
1043 size = data->bpw_len;
1044 rem = data->bpw_len;
1045 head = 0;
1046 }
1047
1048 dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
1049 if (!dma->sg_tx_p)
1050 return;
1051
1052 sg_init_table(dma->sg_tx_p, num);
1053
1054 sg = dma->sg_tx_p;
1055 for (i = 0; i < num; i++, sg++) {
1056 if (i == 0) {
1057 sg->offset = 0;
1058 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1059 sg->offset);
1060 sg_dma_len(sg) = size + head;
1061 } else if (i == (num - 1)) {
1062 sg->offset = head + size * i;
1063 sg->offset = sg->offset * (*bpw / 8);
1064 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1065 sg->offset);
1066 sg_dma_len(sg) = rem;
1067 } else {
1068 sg->offset = head + size * i;
1069 sg->offset = sg->offset * (*bpw / 8);
1070 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1071 sg->offset);
1072 sg_dma_len(sg) = size;
1073 }
1074 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1075 }
1076 sg = dma->sg_tx_p;
1077 desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1078 sg, num, DMA_MEM_TO_DEV,
1079 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1080 if (!desc_tx) {
1081 dev_err(&data->master->dev,
1082 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1083 return;
1084 }
1085 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1086 desc_tx->callback = NULL;
1087 desc_tx->callback_param = data;
1088 dma->nent = num;
1089 dma->desc_tx = desc_tx;
1090
1091 dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
1092
1093 spin_lock_irqsave(&data->lock, flags);
1094 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1095 desc_rx->tx_submit(desc_rx);
1096 desc_tx->tx_submit(desc_tx);
1097 spin_unlock_irqrestore(&data->lock, flags);
1098
1099
1100 data->transfer_complete = false;
1101 }
1102
1103 static void pch_spi_process_messages(struct work_struct *pwork)
1104 {
1105 struct spi_message *pmsg, *tmp;
1106 struct pch_spi_data *data;
1107 int bpw;
1108
1109 data = container_of(pwork, struct pch_spi_data, work);
1110 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1111
1112 spin_lock(&data->lock);
1113
1114 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1115 dev_dbg(&data->master->dev,
1116 "%s suspend/remove initiated, flushing queue\n", __func__);
1117 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
1118 pmsg->status = -EIO;
1119
1120 if (pmsg->complete) {
1121 spin_unlock(&data->lock);
1122 pmsg->complete(pmsg->context);
1123 spin_lock(&data->lock);
1124 }
1125
1126
1127 list_del_init(&pmsg->queue);
1128 }
1129
1130 spin_unlock(&data->lock);
1131 return;
1132 }
1133
1134 data->bcurrent_msg_processing = true;
1135 dev_dbg(&data->master->dev,
1136 "%s Set data->bcurrent_msg_processing= true\n", __func__);
1137
1138
1139 data->current_msg = list_entry(data->queue.next, struct spi_message,
1140 queue);
1141
1142 list_del_init(&data->current_msg->queue);
1143
1144 data->current_msg->status = 0;
1145
1146 pch_spi_select_chip(data, data->current_msg->spi);
1147
1148 spin_unlock(&data->lock);
1149
1150 if (data->use_dma)
1151 pch_spi_request_dma(data,
1152 data->current_msg->spi->bits_per_word);
1153 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1154 do {
1155 int cnt;
1156
1157
1158
1159 spin_lock(&data->lock);
1160 if (data->cur_trans == NULL) {
1161 data->cur_trans =
1162 list_entry(data->current_msg->transfers.next,
1163 struct spi_transfer, transfer_list);
1164 dev_dbg(&data->master->dev,
1165 "%s :Getting 1st transfer message\n",
1166 __func__);
1167 } else {
1168 data->cur_trans =
1169 list_entry(data->cur_trans->transfer_list.next,
1170 struct spi_transfer, transfer_list);
1171 dev_dbg(&data->master->dev,
1172 "%s :Getting next transfer message\n",
1173 __func__);
1174 }
1175 spin_unlock(&data->lock);
1176
1177 if (!data->cur_trans->len)
1178 goto out;
1179 cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1180 data->save_total_len = data->cur_trans->len;
1181 if (data->use_dma) {
1182 int i;
1183 char *save_rx_buf = data->cur_trans->rx_buf;
1184
1185 for (i = 0; i < cnt; i++) {
1186 pch_spi_handle_dma(data, &bpw);
1187 if (!pch_spi_start_transfer(data)) {
1188 data->transfer_complete = true;
1189 data->current_msg->status = -EIO;
1190 data->current_msg->complete
1191 (data->current_msg->context);
1192 data->bcurrent_msg_processing = false;
1193 data->current_msg = NULL;
1194 data->cur_trans = NULL;
1195 goto out;
1196 }
1197 pch_spi_copy_rx_data_for_dma(data, bpw);
1198 }
1199 data->cur_trans->rx_buf = save_rx_buf;
1200 } else {
1201 pch_spi_set_tx(data, &bpw);
1202 pch_spi_set_ir(data);
1203 pch_spi_copy_rx_data(data, bpw);
1204 kfree(data->pkt_rx_buff);
1205 data->pkt_rx_buff = NULL;
1206 kfree(data->pkt_tx_buff);
1207 data->pkt_tx_buff = NULL;
1208 }
1209
1210 data->cur_trans->len = data->save_total_len;
1211 data->current_msg->actual_length += data->cur_trans->len;
1212
1213 dev_dbg(&data->master->dev,
1214 "%s:data->current_msg->actual_length=%d\n",
1215 __func__, data->current_msg->actual_length);
1216
1217 spi_transfer_delay_exec(data->cur_trans);
1218
1219 spin_lock(&data->lock);
1220
1221
1222 if ((data->cur_trans->transfer_list.next) ==
1223 &(data->current_msg->transfers)) {
1224 pch_spi_nomore_transfer(data);
1225 }
1226
1227 spin_unlock(&data->lock);
1228
1229 } while (data->cur_trans != NULL);
1230
1231 out:
1232 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1233 if (data->use_dma)
1234 pch_spi_release_dma(data);
1235 }
1236
1237 static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1238 struct pch_spi_data *data)
1239 {
1240 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1241
1242 flush_work(&data->work);
1243 }
1244
1245 static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1246 struct pch_spi_data *data)
1247 {
1248 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1249
1250
1251 pch_spi_reset(data->master);
1252 dev_dbg(&board_dat->pdev->dev,
1253 "%s pch_spi_reset invoked successfully\n", __func__);
1254
1255 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1256
1257 return 0;
1258 }
1259
1260 static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1261 struct pch_spi_data *data)
1262 {
1263 struct pch_spi_dma_ctrl *dma;
1264
1265 dma = &data->dma;
1266 if (dma->tx_buf_dma)
1267 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1268 dma->tx_buf_virt, dma->tx_buf_dma);
1269 if (dma->rx_buf_dma)
1270 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1271 dma->rx_buf_virt, dma->rx_buf_dma);
1272 }
1273
1274 static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1275 struct pch_spi_data *data)
1276 {
1277 struct pch_spi_dma_ctrl *dma;
1278 int ret;
1279
1280 dma = &data->dma;
1281 ret = 0;
1282
1283 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1284 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1285 if (!dma->tx_buf_virt)
1286 ret = -ENOMEM;
1287
1288
1289 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1290 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1291 if (!dma->rx_buf_virt)
1292 ret = -ENOMEM;
1293
1294 return ret;
1295 }
1296
1297 static int pch_spi_pd_probe(struct platform_device *plat_dev)
1298 {
1299 int ret;
1300 struct spi_master *master;
1301 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1302 struct pch_spi_data *data;
1303
1304 dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1305
1306 master = spi_alloc_master(&board_dat->pdev->dev,
1307 sizeof(struct pch_spi_data));
1308 if (!master) {
1309 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1310 plat_dev->id);
1311 return -ENOMEM;
1312 }
1313
1314 data = spi_master_get_devdata(master);
1315 data->master = master;
1316
1317 platform_set_drvdata(plat_dev, data);
1318
1319
1320 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1321 PCH_ADDRESS_SIZE * plat_dev->id;
1322 data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
1323 if (!data->io_remap_addr) {
1324 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1325 ret = -ENOMEM;
1326 goto err_pci_iomap;
1327 }
1328 data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
1329
1330 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1331 plat_dev->id, data->io_remap_addr);
1332
1333
1334 master->num_chipselect = PCH_MAX_CS;
1335 master->transfer = pch_spi_transfer;
1336 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1337 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
1338 master->max_speed_hz = PCH_MAX_BAUDRATE;
1339 master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
1340
1341 data->board_dat = board_dat;
1342 data->plat_dev = plat_dev;
1343 data->n_curnt_chip = 255;
1344 data->status = STATUS_RUNNING;
1345 data->ch = plat_dev->id;
1346 data->use_dma = use_dma;
1347
1348 INIT_LIST_HEAD(&data->queue);
1349 spin_lock_init(&data->lock);
1350 INIT_WORK(&data->work, pch_spi_process_messages);
1351 init_waitqueue_head(&data->wait);
1352
1353 ret = pch_spi_get_resources(board_dat, data);
1354 if (ret) {
1355 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1356 goto err_spi_get_resources;
1357 }
1358
1359 ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1360 IRQF_SHARED, KBUILD_MODNAME, data);
1361 if (ret) {
1362 dev_err(&plat_dev->dev,
1363 "%s request_irq failed\n", __func__);
1364 goto err_request_irq;
1365 }
1366 data->irq_reg_sts = true;
1367
1368 pch_spi_set_master_mode(master);
1369
1370 if (use_dma) {
1371 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1372 ret = pch_alloc_dma_buf(board_dat, data);
1373 if (ret)
1374 goto err_spi_register_master;
1375 }
1376
1377 ret = spi_register_master(master);
1378 if (ret != 0) {
1379 dev_err(&plat_dev->dev,
1380 "%s spi_register_master FAILED\n", __func__);
1381 goto err_spi_register_master;
1382 }
1383
1384 return 0;
1385
1386 err_spi_register_master:
1387 pch_free_dma_buf(board_dat, data);
1388 free_irq(board_dat->pdev->irq, data);
1389 err_request_irq:
1390 pch_spi_free_resources(board_dat, data);
1391 err_spi_get_resources:
1392 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1393 err_pci_iomap:
1394 spi_master_put(master);
1395
1396 return ret;
1397 }
1398
1399 static int pch_spi_pd_remove(struct platform_device *plat_dev)
1400 {
1401 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1402 struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1403 int count;
1404 unsigned long flags;
1405
1406 dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1407 __func__, plat_dev->id, board_dat->pdev->irq);
1408
1409 if (use_dma)
1410 pch_free_dma_buf(board_dat, data);
1411
1412
1413
1414 count = 500;
1415 spin_lock_irqsave(&data->lock, flags);
1416 data->status = STATUS_EXITING;
1417 while ((list_empty(&data->queue) == 0) && --count) {
1418 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1419 __func__);
1420 spin_unlock_irqrestore(&data->lock, flags);
1421 msleep(PCH_SLEEP_TIME);
1422 spin_lock_irqsave(&data->lock, flags);
1423 }
1424 spin_unlock_irqrestore(&data->lock, flags);
1425
1426 pch_spi_free_resources(board_dat, data);
1427
1428 if (data->irq_reg_sts) {
1429
1430 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1431 data->irq_reg_sts = false;
1432 free_irq(board_dat->pdev->irq, data);
1433 }
1434
1435 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1436 spi_unregister_master(data->master);
1437
1438 return 0;
1439 }
1440 #ifdef CONFIG_PM
1441 static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1442 pm_message_t state)
1443 {
1444 u8 count;
1445 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1446 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1447
1448 dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1449
1450 if (!board_dat) {
1451 dev_err(&pd_dev->dev,
1452 "%s pci_get_drvdata returned NULL\n", __func__);
1453 return -EFAULT;
1454 }
1455
1456
1457
1458 count = 255;
1459 while ((--count) > 0) {
1460 if (!(data->bcurrent_msg_processing))
1461 break;
1462 msleep(PCH_SLEEP_TIME);
1463 }
1464
1465
1466 if (data->irq_reg_sts) {
1467
1468 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1469 pch_spi_reset(data->master);
1470 free_irq(board_dat->pdev->irq, data);
1471
1472 data->irq_reg_sts = false;
1473 dev_dbg(&pd_dev->dev,
1474 "%s free_irq invoked successfully.\n", __func__);
1475 }
1476
1477 return 0;
1478 }
1479
1480 static int pch_spi_pd_resume(struct platform_device *pd_dev)
1481 {
1482 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1483 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1484 int retval;
1485
1486 if (!board_dat) {
1487 dev_err(&pd_dev->dev,
1488 "%s pci_get_drvdata returned NULL\n", __func__);
1489 return -EFAULT;
1490 }
1491
1492 if (!data->irq_reg_sts) {
1493
1494 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1495 IRQF_SHARED, KBUILD_MODNAME, data);
1496 if (retval < 0) {
1497 dev_err(&pd_dev->dev,
1498 "%s request_irq failed\n", __func__);
1499 return retval;
1500 }
1501
1502
1503 pch_spi_reset(data->master);
1504 pch_spi_set_master_mode(data->master);
1505 data->irq_reg_sts = true;
1506 }
1507 return 0;
1508 }
1509 #else
1510 #define pch_spi_pd_suspend NULL
1511 #define pch_spi_pd_resume NULL
1512 #endif
1513
1514 static struct platform_driver pch_spi_pd_driver = {
1515 .driver = {
1516 .name = "pch-spi",
1517 },
1518 .probe = pch_spi_pd_probe,
1519 .remove = pch_spi_pd_remove,
1520 .suspend = pch_spi_pd_suspend,
1521 .resume = pch_spi_pd_resume
1522 };
1523
1524 static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1525 {
1526 struct pch_spi_board_data *board_dat;
1527 struct platform_device *pd_dev = NULL;
1528 int retval;
1529 int i;
1530 struct pch_pd_dev_save *pd_dev_save;
1531
1532 pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
1533 if (!pd_dev_save)
1534 return -ENOMEM;
1535
1536 board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
1537 if (!board_dat) {
1538 retval = -ENOMEM;
1539 goto err_no_mem;
1540 }
1541
1542 retval = pci_request_regions(pdev, KBUILD_MODNAME);
1543 if (retval) {
1544 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1545 goto pci_request_regions;
1546 }
1547
1548 board_dat->pdev = pdev;
1549 board_dat->num = id->driver_data;
1550 pd_dev_save->num = id->driver_data;
1551 pd_dev_save->board_dat = board_dat;
1552
1553 retval = pci_enable_device(pdev);
1554 if (retval) {
1555 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1556 goto pci_enable_device;
1557 }
1558
1559 for (i = 0; i < board_dat->num; i++) {
1560 pd_dev = platform_device_alloc("pch-spi", i);
1561 if (!pd_dev) {
1562 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1563 retval = -ENOMEM;
1564 goto err_platform_device;
1565 }
1566 pd_dev_save->pd_save[i] = pd_dev;
1567 pd_dev->dev.parent = &pdev->dev;
1568
1569 retval = platform_device_add_data(pd_dev, board_dat,
1570 sizeof(*board_dat));
1571 if (retval) {
1572 dev_err(&pdev->dev,
1573 "platform_device_add_data failed\n");
1574 platform_device_put(pd_dev);
1575 goto err_platform_device;
1576 }
1577
1578 retval = platform_device_add(pd_dev);
1579 if (retval) {
1580 dev_err(&pdev->dev, "platform_device_add failed\n");
1581 platform_device_put(pd_dev);
1582 goto err_platform_device;
1583 }
1584 }
1585
1586 pci_set_drvdata(pdev, pd_dev_save);
1587
1588 return 0;
1589
1590 err_platform_device:
1591 while (--i >= 0)
1592 platform_device_unregister(pd_dev_save->pd_save[i]);
1593 pci_disable_device(pdev);
1594 pci_enable_device:
1595 pci_release_regions(pdev);
1596 pci_request_regions:
1597 kfree(board_dat);
1598 err_no_mem:
1599 kfree(pd_dev_save);
1600
1601 return retval;
1602 }
1603
1604 static void pch_spi_remove(struct pci_dev *pdev)
1605 {
1606 int i;
1607 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1608
1609 dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1610
1611 for (i = 0; i < pd_dev_save->num; i++)
1612 platform_device_unregister(pd_dev_save->pd_save[i]);
1613
1614 pci_disable_device(pdev);
1615 pci_release_regions(pdev);
1616 kfree(pd_dev_save->board_dat);
1617 kfree(pd_dev_save);
1618 }
1619
1620 static int __maybe_unused pch_spi_suspend(struct device *dev)
1621 {
1622 struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
1623
1624 dev_dbg(dev, "%s ENTRY\n", __func__);
1625
1626 pd_dev_save->board_dat->suspend_sts = true;
1627
1628 return 0;
1629 }
1630
1631 static int __maybe_unused pch_spi_resume(struct device *dev)
1632 {
1633 struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
1634
1635 dev_dbg(dev, "%s ENTRY\n", __func__);
1636
1637
1638 pd_dev_save->board_dat->suspend_sts = false;
1639
1640 return 0;
1641 }
1642
1643 static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
1644
1645 static struct pci_driver pch_spi_pcidev_driver = {
1646 .name = "pch_spi",
1647 .id_table = pch_spi_pcidev_id,
1648 .probe = pch_spi_probe,
1649 .remove = pch_spi_remove,
1650 .driver.pm = &pch_spi_pm_ops,
1651 };
1652
1653 static int __init pch_spi_init(void)
1654 {
1655 int ret;
1656 ret = platform_driver_register(&pch_spi_pd_driver);
1657 if (ret)
1658 return ret;
1659
1660 ret = pci_register_driver(&pch_spi_pcidev_driver);
1661 if (ret) {
1662 platform_driver_unregister(&pch_spi_pd_driver);
1663 return ret;
1664 }
1665
1666 return 0;
1667 }
1668 module_init(pch_spi_init);
1669
1670 static void __exit pch_spi_exit(void)
1671 {
1672 pci_unregister_driver(&pch_spi_pcidev_driver);
1673 platform_driver_unregister(&pch_spi_pd_driver);
1674 }
1675 module_exit(pch_spi_exit);
1676
1677 module_param(use_dma, int, 0644);
1678 MODULE_PARM_DESC(use_dma,
1679 "to use DMA for data transfers pass 1 else 0; default 1");
1680
1681 MODULE_LICENSE("GPL");
1682 MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
1683 MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
1684