0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/kernel.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/irq.h>
0014 #include <linux/mfd/ezx-pcap.h>
0015 #include <linux/spi/spi.h>
0016 #include <linux/gpio.h>
0017 #include <linux/slab.h>
0018
0019 #define PCAP_ADC_MAXQ 8
0020 struct pcap_adc_request {
0021 u8 bank;
0022 u8 ch[2];
0023 u32 flags;
0024 void (*callback)(void *, u16[]);
0025 void *data;
0026 };
0027
0028 struct pcap_adc_sync_request {
0029 u16 res[2];
0030 struct completion completion;
0031 };
0032
0033 struct pcap_chip {
0034 struct spi_device *spi;
0035
0036
0037 u32 buf;
0038 spinlock_t io_lock;
0039
0040
0041 unsigned int irq_base;
0042 u32 msr;
0043 struct work_struct isr_work;
0044 struct work_struct msr_work;
0045 struct workqueue_struct *workqueue;
0046
0047
0048 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
0049 u8 adc_head;
0050 u8 adc_tail;
0051 spinlock_t adc_lock;
0052 };
0053
0054
0055 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
0056 {
0057 struct spi_transfer t;
0058 struct spi_message m;
0059 int status;
0060
0061 memset(&t, 0, sizeof(t));
0062 spi_message_init(&m);
0063 t.len = sizeof(u32);
0064 spi_message_add_tail(&t, &m);
0065
0066 pcap->buf = *data;
0067 t.tx_buf = (u8 *) &pcap->buf;
0068 t.rx_buf = (u8 *) &pcap->buf;
0069 status = spi_sync(pcap->spi, &m);
0070
0071 if (status == 0)
0072 *data = pcap->buf;
0073
0074 return status;
0075 }
0076
0077 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
0078 {
0079 unsigned long flags;
0080 int ret;
0081
0082 spin_lock_irqsave(&pcap->io_lock, flags);
0083 value &= PCAP_REGISTER_VALUE_MASK;
0084 value |= PCAP_REGISTER_WRITE_OP_BIT
0085 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
0086 ret = ezx_pcap_putget(pcap, &value);
0087 spin_unlock_irqrestore(&pcap->io_lock, flags);
0088
0089 return ret;
0090 }
0091 EXPORT_SYMBOL_GPL(ezx_pcap_write);
0092
0093 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
0094 {
0095 unsigned long flags;
0096 int ret;
0097
0098 spin_lock_irqsave(&pcap->io_lock, flags);
0099 *value = PCAP_REGISTER_READ_OP_BIT
0100 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
0101
0102 ret = ezx_pcap_putget(pcap, value);
0103 spin_unlock_irqrestore(&pcap->io_lock, flags);
0104
0105 return ret;
0106 }
0107 EXPORT_SYMBOL_GPL(ezx_pcap_read);
0108
0109 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
0110 {
0111 unsigned long flags;
0112 int ret;
0113 u32 tmp = PCAP_REGISTER_READ_OP_BIT |
0114 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
0115
0116 spin_lock_irqsave(&pcap->io_lock, flags);
0117 ret = ezx_pcap_putget(pcap, &tmp);
0118 if (ret)
0119 goto out_unlock;
0120
0121 tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
0122 tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
0123 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
0124
0125 ret = ezx_pcap_putget(pcap, &tmp);
0126 out_unlock:
0127 spin_unlock_irqrestore(&pcap->io_lock, flags);
0128
0129 return ret;
0130 }
0131 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
0132
0133
0134 int irq_to_pcap(struct pcap_chip *pcap, int irq)
0135 {
0136 return irq - pcap->irq_base;
0137 }
0138 EXPORT_SYMBOL_GPL(irq_to_pcap);
0139
0140 int pcap_to_irq(struct pcap_chip *pcap, int irq)
0141 {
0142 return pcap->irq_base + irq;
0143 }
0144 EXPORT_SYMBOL_GPL(pcap_to_irq);
0145
0146 static void pcap_mask_irq(struct irq_data *d)
0147 {
0148 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
0149
0150 pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
0151 queue_work(pcap->workqueue, &pcap->msr_work);
0152 }
0153
0154 static void pcap_unmask_irq(struct irq_data *d)
0155 {
0156 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
0157
0158 pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
0159 queue_work(pcap->workqueue, &pcap->msr_work);
0160 }
0161
0162 static struct irq_chip pcap_irq_chip = {
0163 .name = "pcap",
0164 .irq_disable = pcap_mask_irq,
0165 .irq_mask = pcap_mask_irq,
0166 .irq_unmask = pcap_unmask_irq,
0167 };
0168
0169 static void pcap_msr_work(struct work_struct *work)
0170 {
0171 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
0172
0173 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
0174 }
0175
0176 static void pcap_isr_work(struct work_struct *work)
0177 {
0178 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
0179 struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev);
0180 u32 msr, isr, int_sel, service;
0181 int irq;
0182
0183 do {
0184 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
0185 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
0186
0187
0188 if (!(pdata->config & PCAP_SECOND_PORT)) {
0189 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
0190 isr &= ~int_sel;
0191 }
0192
0193 ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
0194 ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
0195
0196 service = isr & ~msr;
0197 for (irq = pcap->irq_base; service; service >>= 1, irq++) {
0198 if (service & 1)
0199 generic_handle_irq_safe(irq);
0200 }
0201 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
0202 } while (gpio_get_value(pdata->gpio));
0203 }
0204
0205 static void pcap_irq_handler(struct irq_desc *desc)
0206 {
0207 struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
0208
0209 desc->irq_data.chip->irq_ack(&desc->irq_data);
0210 queue_work(pcap->workqueue, &pcap->isr_work);
0211 }
0212
0213
0214 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
0215 {
0216 unsigned long flags;
0217 u32 tmp;
0218
0219 spin_lock_irqsave(&pcap->adc_lock, flags);
0220 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
0221 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
0222 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
0223 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
0224 spin_unlock_irqrestore(&pcap->adc_lock, flags);
0225 }
0226 EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
0227
0228 static void pcap_disable_adc(struct pcap_chip *pcap)
0229 {
0230 u32 tmp;
0231
0232 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
0233 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
0234 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
0235 }
0236
0237 static void pcap_adc_trigger(struct pcap_chip *pcap)
0238 {
0239 unsigned long flags;
0240 u32 tmp;
0241 u8 head;
0242
0243 spin_lock_irqsave(&pcap->adc_lock, flags);
0244 head = pcap->adc_head;
0245 if (!pcap->adc_queue[head]) {
0246
0247 pcap_disable_adc(pcap);
0248 spin_unlock_irqrestore(&pcap->adc_lock, flags);
0249 return;
0250 }
0251
0252 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
0253 tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
0254 tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
0255
0256 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
0257 tmp |= PCAP_ADC_AD_SEL1;
0258
0259 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
0260 spin_unlock_irqrestore(&pcap->adc_lock, flags);
0261 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
0262 }
0263
0264 static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
0265 {
0266 struct pcap_chip *pcap = _pcap;
0267 struct pcap_adc_request *req;
0268 u16 res[2];
0269 u32 tmp;
0270
0271 spin_lock(&pcap->adc_lock);
0272 req = pcap->adc_queue[pcap->adc_head];
0273
0274 if (WARN(!req, "adc irq without pending request\n")) {
0275 spin_unlock(&pcap->adc_lock);
0276 return IRQ_HANDLED;
0277 }
0278
0279
0280 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
0281 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
0282 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
0283 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
0284 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
0285 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
0286 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
0287 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
0288
0289 pcap->adc_queue[pcap->adc_head] = NULL;
0290 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
0291 spin_unlock(&pcap->adc_lock);
0292
0293
0294 req->callback(req->data, res);
0295 kfree(req);
0296
0297
0298 pcap_adc_trigger(pcap);
0299
0300 return IRQ_HANDLED;
0301 }
0302
0303 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
0304 void *callback, void *data)
0305 {
0306 struct pcap_adc_request *req;
0307 unsigned long irq_flags;
0308
0309
0310 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
0311 if (!req)
0312 return -ENOMEM;
0313
0314 req->bank = bank;
0315 req->flags = flags;
0316 req->ch[0] = ch[0];
0317 req->ch[1] = ch[1];
0318 req->callback = callback;
0319 req->data = data;
0320
0321 spin_lock_irqsave(&pcap->adc_lock, irq_flags);
0322 if (pcap->adc_queue[pcap->adc_tail]) {
0323 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
0324 kfree(req);
0325 return -EBUSY;
0326 }
0327 pcap->adc_queue[pcap->adc_tail] = req;
0328 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
0329 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
0330
0331
0332 pcap_adc_trigger(pcap);
0333
0334 return 0;
0335 }
0336 EXPORT_SYMBOL_GPL(pcap_adc_async);
0337
0338 static void pcap_adc_sync_cb(void *param, u16 res[])
0339 {
0340 struct pcap_adc_sync_request *req = param;
0341
0342 req->res[0] = res[0];
0343 req->res[1] = res[1];
0344 complete(&req->completion);
0345 }
0346
0347 int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
0348 u16 res[])
0349 {
0350 struct pcap_adc_sync_request sync_data;
0351 int ret;
0352
0353 init_completion(&sync_data.completion);
0354 ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
0355 &sync_data);
0356 if (ret)
0357 return ret;
0358 wait_for_completion(&sync_data.completion);
0359 res[0] = sync_data.res[0];
0360 res[1] = sync_data.res[1];
0361
0362 return 0;
0363 }
0364 EXPORT_SYMBOL_GPL(pcap_adc_sync);
0365
0366
0367 static int pcap_remove_subdev(struct device *dev, void *unused)
0368 {
0369 platform_device_unregister(to_platform_device(dev));
0370 return 0;
0371 }
0372
0373 static int pcap_add_subdev(struct pcap_chip *pcap,
0374 struct pcap_subdev *subdev)
0375 {
0376 struct platform_device *pdev;
0377 int ret;
0378
0379 pdev = platform_device_alloc(subdev->name, subdev->id);
0380 if (!pdev)
0381 return -ENOMEM;
0382
0383 pdev->dev.parent = &pcap->spi->dev;
0384 pdev->dev.platform_data = subdev->platform_data;
0385
0386 ret = platform_device_add(pdev);
0387 if (ret)
0388 platform_device_put(pdev);
0389
0390 return ret;
0391 }
0392
0393 static void ezx_pcap_remove(struct spi_device *spi)
0394 {
0395 struct pcap_chip *pcap = spi_get_drvdata(spi);
0396 unsigned long flags;
0397 int i;
0398
0399
0400 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
0401
0402
0403 spin_lock_irqsave(&pcap->adc_lock, flags);
0404 for (i = 0; i < PCAP_ADC_MAXQ; i++)
0405 kfree(pcap->adc_queue[i]);
0406 spin_unlock_irqrestore(&pcap->adc_lock, flags);
0407
0408
0409 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
0410 irq_set_chip_and_handler(i, NULL, NULL);
0411
0412 destroy_workqueue(pcap->workqueue);
0413 }
0414
0415 static int ezx_pcap_probe(struct spi_device *spi)
0416 {
0417 struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
0418 struct pcap_chip *pcap;
0419 int i, adc_irq;
0420 int ret = -ENODEV;
0421
0422
0423 if (!pdata)
0424 goto ret;
0425
0426 pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL);
0427 if (!pcap) {
0428 ret = -ENOMEM;
0429 goto ret;
0430 }
0431
0432 spin_lock_init(&pcap->io_lock);
0433 spin_lock_init(&pcap->adc_lock);
0434 INIT_WORK(&pcap->isr_work, pcap_isr_work);
0435 INIT_WORK(&pcap->msr_work, pcap_msr_work);
0436 spi_set_drvdata(spi, pcap);
0437
0438
0439 spi->bits_per_word = 32;
0440 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
0441 ret = spi_setup(spi);
0442 if (ret)
0443 goto ret;
0444
0445 pcap->spi = spi;
0446
0447
0448 pcap->irq_base = pdata->irq_base;
0449 pcap->workqueue = create_singlethread_workqueue("pcapd");
0450 if (!pcap->workqueue) {
0451 ret = -ENOMEM;
0452 dev_err(&spi->dev, "can't create pcap thread\n");
0453 goto ret;
0454 }
0455
0456
0457 if (!(pdata->config & PCAP_SECOND_PORT))
0458 ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
0459 (1 << PCAP_IRQ_ADCDONE2));
0460
0461
0462 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
0463 irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
0464 irq_set_chip_data(i, pcap);
0465 irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
0466 }
0467
0468
0469 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
0470 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
0471 pcap->msr = PCAP_MASK_ALL_INTERRUPT;
0472
0473 irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
0474 irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
0475 irq_set_irq_wake(spi->irq, 1);
0476
0477
0478 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
0479 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
0480
0481 ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC",
0482 pcap);
0483 if (ret)
0484 goto free_irqchip;
0485
0486
0487 for (i = 0; i < pdata->num_subdevs; i++) {
0488 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
0489 if (ret)
0490 goto remove_subdevs;
0491 }
0492
0493
0494 if (pdata->init)
0495 pdata->init(pcap);
0496
0497 return 0;
0498
0499 remove_subdevs:
0500 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
0501 free_irqchip:
0502 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
0503 irq_set_chip_and_handler(i, NULL, NULL);
0504
0505 destroy_workqueue(pcap->workqueue);
0506 ret:
0507 return ret;
0508 }
0509
0510 static struct spi_driver ezxpcap_driver = {
0511 .probe = ezx_pcap_probe,
0512 .remove = ezx_pcap_remove,
0513 .driver = {
0514 .name = "ezx-pcap",
0515 },
0516 };
0517
0518 static int __init ezx_pcap_init(void)
0519 {
0520 return spi_register_driver(&ezxpcap_driver);
0521 }
0522
0523 static void __exit ezx_pcap_exit(void)
0524 {
0525 spi_unregister_driver(&ezxpcap_driver);
0526 }
0527
0528 subsys_initcall(ezx_pcap_init);
0529 module_exit(ezx_pcap_exit);
0530
0531 MODULE_LICENSE("GPL");
0532 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
0533 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
0534 MODULE_ALIAS("spi:ezx-pcap");