0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/init.h>
0013 #include <linux/module.h>
0014 #include <linux/moduleparam.h>
0015 #include <linux/kmod.h>
0016 #include <linux/kernel.h>
0017 #include <linux/slab.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/delay.h>
0020 #include <linux/list.h>
0021 #include <media/videobuf2-v4l2.h>
0022 #include <media/videobuf2-vmalloc.h>
0023
0024 #include "netup_unidvb.h"
0025 #include "cxd2841er.h"
0026 #include "horus3a.h"
0027 #include "ascot2e.h"
0028 #include "helene.h"
0029 #include "lnbh25.h"
0030
0031 static int spi_enable;
0032 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
0033
0034 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
0035 MODULE_AUTHOR("info@netup.ru");
0036 MODULE_VERSION(NETUP_UNIDVB_VERSION);
0037 MODULE_LICENSE("GPL");
0038
0039 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
0040
0041
0042 #define AVL_PCIE_IENR 0x50
0043 #define AVL_PCIE_ISR 0x40
0044 #define AVL_IRQ_ENABLE 0x80
0045 #define AVL_IRQ_ASSERTED 0x80
0046
0047 #define GPIO_REG_IO 0x4880
0048 #define GPIO_REG_IO_TOGGLE 0x4882
0049 #define GPIO_REG_IO_SET 0x4884
0050 #define GPIO_REG_IO_CLEAR 0x4886
0051
0052 #define GPIO_FEA_RESET (1 << 0)
0053 #define GPIO_FEB_RESET (1 << 1)
0054 #define GPIO_RFA_CTL (1 << 2)
0055 #define GPIO_RFB_CTL (1 << 3)
0056 #define GPIO_FEA_TU_RESET (1 << 4)
0057 #define GPIO_FEB_TU_RESET (1 << 5)
0058
0059 #define NETUP_DMA0_ADDR 0x4900
0060 #define NETUP_DMA1_ADDR 0x4940
0061
0062 #define NETUP_DMA_BLOCKS_COUNT 8
0063 #define NETUP_DMA_PACKETS_COUNT 128
0064
0065 #define BIT_DMA_RUN 1
0066 #define BIT_DMA_ERROR 2
0067 #define BIT_DMA_IRQ 0x200
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 struct netup_dma_regs {
0090 __le32 ctrlstat_set;
0091 __le32 ctrlstat_clear;
0092 __le32 start_addr_lo;
0093 __le32 start_addr_hi;
0094 __le32 size;
0095 __le32 timeout;
0096 __le32 curr_addr_lo;
0097 __le32 curr_addr_hi;
0098 __le32 stat_pkt_received;
0099 __le32 stat_pkt_accepted;
0100 __le32 stat_pkt_overruns;
0101 __le32 stat_pkt_underruns;
0102 __le32 stat_fifo_overruns;
0103 } __packed __aligned(1);
0104
0105 struct netup_unidvb_buffer {
0106 struct vb2_v4l2_buffer vb;
0107 struct list_head list;
0108 u32 size;
0109 };
0110
0111 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
0112 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
0113
0114 static struct cxd2841er_config demod_config = {
0115 .i2c_addr = 0xc8,
0116 .xtal = SONY_XTAL_24000,
0117 .flags = CXD2841ER_USE_GATECTRL | CXD2841ER_ASCOT
0118 };
0119
0120 static struct horus3a_config horus3a_conf = {
0121 .i2c_address = 0xc0,
0122 .xtal_freq_mhz = 16,
0123 .set_tuner_callback = netup_unidvb_tuner_ctrl
0124 };
0125
0126 static struct ascot2e_config ascot2e_conf = {
0127 .i2c_address = 0xc2,
0128 .set_tuner_callback = netup_unidvb_tuner_ctrl
0129 };
0130
0131 static struct helene_config helene_conf = {
0132 .i2c_address = 0xc0,
0133 .xtal = SONY_HELENE_XTAL_24000,
0134 .set_tuner_callback = netup_unidvb_tuner_ctrl
0135 };
0136
0137 static struct lnbh25_config lnbh25_conf = {
0138 .i2c_address = 0x10,
0139 .data2_config = LNBH25_TEN | LNBH25_EXTM
0140 };
0141
0142 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
0143 {
0144 u8 reg, mask;
0145 struct netup_dma *dma = priv;
0146 struct netup_unidvb_dev *ndev;
0147
0148 if (!priv)
0149 return -EINVAL;
0150 ndev = dma->ndev;
0151 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
0152 __func__, dma->num, is_dvb_tc);
0153 reg = readb(ndev->bmmio0 + GPIO_REG_IO);
0154 mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
0155
0156
0157 if (ndev->rev == NETUP_HW_REV_1_4)
0158 is_dvb_tc = !is_dvb_tc;
0159
0160 if (!is_dvb_tc)
0161 reg |= mask;
0162 else
0163 reg &= ~mask;
0164 writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
0165 return 0;
0166 }
0167
0168 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
0169 {
0170 u16 gpio_reg;
0171
0172
0173 writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
0174
0175 writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
0176 msleep(100);
0177 gpio_reg =
0178 GPIO_FEA_RESET | GPIO_FEB_RESET |
0179 GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
0180 GPIO_RFA_CTL | GPIO_RFB_CTL;
0181 writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
0182 dev_dbg(&ndev->pci_dev->dev,
0183 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
0184 __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
0185 (int)readb(ndev->bmmio0 + GPIO_REG_IO));
0186
0187 }
0188
0189 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
0190 {
0191 u32 irq_mask = (dma->num == 0 ?
0192 NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
0193
0194 dev_dbg(&dma->ndev->pci_dev->dev,
0195 "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
0196 if (enable) {
0197 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
0198 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
0199 } else {
0200 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
0201 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
0202 }
0203 }
0204
0205 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
0206 {
0207 u64 addr_curr;
0208 u32 size;
0209 unsigned long flags;
0210 struct device *dev = &dma->ndev->pci_dev->dev;
0211
0212 spin_lock_irqsave(&dma->lock, flags);
0213 addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
0214 (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
0215
0216 writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
0217
0218 if (addr_curr < dma->addr_phys ||
0219 addr_curr > dma->addr_phys + dma->ring_buffer_size) {
0220 if (addr_curr != 0) {
0221 dev_err(dev,
0222 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
0223 __func__, addr_curr, (u64)dma->addr_phys,
0224 (u64)(dma->addr_phys + dma->ring_buffer_size));
0225 }
0226 goto irq_handled;
0227 }
0228 size = (addr_curr >= dma->addr_last) ?
0229 (u32)(addr_curr - dma->addr_last) :
0230 (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
0231 if (dma->data_size != 0) {
0232 printk_ratelimited("%s(): lost interrupt, data size %d\n",
0233 __func__, dma->data_size);
0234 dma->data_size += size;
0235 }
0236 if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
0237 dma->data_size = size;
0238 dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
0239 }
0240 dma->addr_last = addr_curr;
0241 queue_work(dma->ndev->wq, &dma->work);
0242 irq_handled:
0243 spin_unlock_irqrestore(&dma->lock, flags);
0244 return IRQ_HANDLED;
0245 }
0246
0247 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
0248 {
0249 struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
0250 struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
0251 u32 reg40, reg_isr;
0252 irqreturn_t iret = IRQ_NONE;
0253
0254
0255 writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
0256
0257 reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
0258 if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
0259
0260 reg_isr = readw(ndev->bmmio0 + REG_ISR);
0261 if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
0262 iret = netup_spi_interrupt(ndev->spi);
0263 else if (!ndev->old_fw) {
0264 if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
0265 iret = netup_i2c_interrupt(&ndev->i2c[0]);
0266 } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
0267 iret = netup_i2c_interrupt(&ndev->i2c[1]);
0268 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
0269 iret = netup_dma_interrupt(&ndev->dma[0]);
0270 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
0271 iret = netup_dma_interrupt(&ndev->dma[1]);
0272 } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
0273 iret = netup_ci_interrupt(ndev);
0274 } else {
0275 goto err;
0276 }
0277 } else {
0278 err:
0279 dev_err(&pci_dev->dev,
0280 "%s(): unknown interrupt 0x%x\n",
0281 __func__, reg_isr);
0282 }
0283 }
0284
0285 writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
0286 return iret;
0287 }
0288
0289 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
0290 unsigned int *nbuffers,
0291 unsigned int *nplanes,
0292 unsigned int sizes[],
0293 struct device *alloc_devs[])
0294 {
0295 struct netup_dma *dma = vb2_get_drv_priv(vq);
0296
0297 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
0298
0299 *nplanes = 1;
0300 if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
0301 *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
0302 sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
0303 dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
0304 __func__, *nbuffers, sizes[0]);
0305 return 0;
0306 }
0307
0308 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
0309 {
0310 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
0311 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0312 struct netup_unidvb_buffer *buf = container_of(vbuf,
0313 struct netup_unidvb_buffer, vb);
0314
0315 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
0316 buf->size = 0;
0317 return 0;
0318 }
0319
0320 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
0321 {
0322 unsigned long flags;
0323 struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
0324 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
0325 struct netup_unidvb_buffer *buf = container_of(vbuf,
0326 struct netup_unidvb_buffer, vb);
0327
0328 dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
0329 spin_lock_irqsave(&dma->lock, flags);
0330 list_add_tail(&buf->list, &dma->free_buffers);
0331 spin_unlock_irqrestore(&dma->lock, flags);
0332 mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
0333 }
0334
0335 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
0336 {
0337 struct netup_dma *dma = vb2_get_drv_priv(q);
0338
0339 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
0340 netup_unidvb_dma_enable(dma, 1);
0341 return 0;
0342 }
0343
0344 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
0345 {
0346 struct netup_dma *dma = vb2_get_drv_priv(q);
0347
0348 dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
0349 netup_unidvb_dma_enable(dma, 0);
0350 netup_unidvb_queue_cleanup(dma);
0351 }
0352
0353 static const struct vb2_ops dvb_qops = {
0354 .queue_setup = netup_unidvb_queue_setup,
0355 .buf_prepare = netup_unidvb_buf_prepare,
0356 .buf_queue = netup_unidvb_buf_queue,
0357 .start_streaming = netup_unidvb_start_streaming,
0358 .stop_streaming = netup_unidvb_stop_streaming,
0359 };
0360
0361 static int netup_unidvb_queue_init(struct netup_dma *dma,
0362 struct vb2_queue *vb_queue)
0363 {
0364 int res;
0365
0366
0367 vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
0368 vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
0369 vb_queue->drv_priv = dma;
0370 vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
0371 vb_queue->ops = &dvb_qops;
0372 vb_queue->mem_ops = &vb2_vmalloc_memops;
0373 vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
0374 res = vb2_queue_init(vb_queue);
0375 if (res != 0) {
0376 dev_err(&dma->ndev->pci_dev->dev,
0377 "%s(): vb2_queue_init failed (%d)\n", __func__, res);
0378 }
0379 return res;
0380 }
0381
0382 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
0383 int num)
0384 {
0385 int fe_count = 2;
0386 int i = 0;
0387 struct vb2_dvb_frontend *fes[2];
0388 u8 fe_name[32];
0389
0390 if (ndev->rev == NETUP_HW_REV_1_3)
0391 demod_config.xtal = SONY_XTAL_20500;
0392 else
0393 demod_config.xtal = SONY_XTAL_24000;
0394
0395 if (num < 0 || num > 1) {
0396 dev_dbg(&ndev->pci_dev->dev,
0397 "%s(): unable to init DVB bus %d\n", __func__, num);
0398 return -ENODEV;
0399 }
0400 mutex_init(&ndev->frontends[num].lock);
0401 INIT_LIST_HEAD(&ndev->frontends[num].felist);
0402
0403 for (i = 0; i < fe_count; i++) {
0404 if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
0405 == NULL) {
0406 dev_err(&ndev->pci_dev->dev,
0407 "%s(): unable to allocate vb2_dvb_frontend\n",
0408 __func__);
0409 return -ENOMEM;
0410 }
0411 }
0412
0413 for (i = 0; i < fe_count; i++) {
0414 fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
0415 if (fes[i] == NULL) {
0416 dev_err(&ndev->pci_dev->dev,
0417 "%s(): frontends has not been allocated\n",
0418 __func__);
0419 return -EINVAL;
0420 }
0421 }
0422
0423 for (i = 0; i < fe_count; i++) {
0424 netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
0425 snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
0426 fes[i]->dvb.name = fe_name;
0427 }
0428
0429 fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
0430 &demod_config, &ndev->i2c[num].adap);
0431 if (fes[0]->dvb.frontend == NULL) {
0432 dev_dbg(&ndev->pci_dev->dev,
0433 "%s(): unable to attach DVB-S/S2 frontend\n",
0434 __func__);
0435 goto frontend_detach;
0436 }
0437
0438 if (ndev->rev == NETUP_HW_REV_1_3) {
0439 horus3a_conf.set_tuner_priv = &ndev->dma[num];
0440 if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
0441 &horus3a_conf, &ndev->i2c[num].adap)) {
0442 dev_dbg(&ndev->pci_dev->dev,
0443 "%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
0444 __func__);
0445 goto frontend_detach;
0446 }
0447 } else {
0448 helene_conf.set_tuner_priv = &ndev->dma[num];
0449 if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
0450 &helene_conf, &ndev->i2c[num].adap)) {
0451 dev_err(&ndev->pci_dev->dev,
0452 "%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
0453 __func__);
0454 goto frontend_detach;
0455 }
0456 }
0457
0458 if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
0459 &lnbh25_conf, &ndev->i2c[num].adap)) {
0460 dev_dbg(&ndev->pci_dev->dev,
0461 "%s(): unable to attach SEC frontend\n", __func__);
0462 goto frontend_detach;
0463 }
0464
0465
0466 fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
0467 &demod_config, &ndev->i2c[num].adap);
0468 if (fes[1]->dvb.frontend == NULL) {
0469 dev_dbg(&ndev->pci_dev->dev,
0470 "%s(): unable to attach Ter frontend\n", __func__);
0471 goto frontend_detach;
0472 }
0473 fes[1]->dvb.frontend->id = 1;
0474 if (ndev->rev == NETUP_HW_REV_1_3) {
0475 ascot2e_conf.set_tuner_priv = &ndev->dma[num];
0476 if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
0477 &ascot2e_conf, &ndev->i2c[num].adap)) {
0478 dev_dbg(&ndev->pci_dev->dev,
0479 "%s(): unable to attach Ter tuner frontend\n",
0480 __func__);
0481 goto frontend_detach;
0482 }
0483 } else {
0484 helene_conf.set_tuner_priv = &ndev->dma[num];
0485 if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
0486 &helene_conf, &ndev->i2c[num].adap)) {
0487 dev_err(&ndev->pci_dev->dev,
0488 "%s(): unable to attach HELENE Ter tuner frontend\n",
0489 __func__);
0490 goto frontend_detach;
0491 }
0492 }
0493
0494 if (vb2_dvb_register_bus(&ndev->frontends[num],
0495 THIS_MODULE, NULL,
0496 &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
0497 dev_dbg(&ndev->pci_dev->dev,
0498 "%s(): unable to register DVB bus %d\n",
0499 __func__, num);
0500 goto frontend_detach;
0501 }
0502 dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
0503 return 0;
0504 frontend_detach:
0505 vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
0506 return -EINVAL;
0507 }
0508
0509 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
0510 {
0511 if (num < 0 || num > 1) {
0512 dev_err(&ndev->pci_dev->dev,
0513 "%s(): unable to unregister DVB bus %d\n",
0514 __func__, num);
0515 return;
0516 }
0517 vb2_dvb_unregister_bus(&ndev->frontends[num]);
0518 dev_info(&ndev->pci_dev->dev,
0519 "%s(): DVB bus %d unregistered\n", __func__, num);
0520 }
0521
0522 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
0523 {
0524 int res;
0525
0526 res = netup_unidvb_dvb_init(ndev, 0);
0527 if (res)
0528 return res;
0529 res = netup_unidvb_dvb_init(ndev, 1);
0530 if (res) {
0531 netup_unidvb_dvb_fini(ndev, 0);
0532 return res;
0533 }
0534 return 0;
0535 }
0536
0537 static int netup_unidvb_ring_copy(struct netup_dma *dma,
0538 struct netup_unidvb_buffer *buf)
0539 {
0540 u32 copy_bytes, ring_bytes;
0541 u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
0542 u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
0543 struct netup_unidvb_dev *ndev = dma->ndev;
0544
0545 if (p == NULL) {
0546 dev_err(&ndev->pci_dev->dev,
0547 "%s(): buffer is NULL\n", __func__);
0548 return -EINVAL;
0549 }
0550 p += buf->size;
0551 if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
0552 ring_bytes = dma->ring_buffer_size - dma->data_offset;
0553 copy_bytes = (ring_bytes > buff_bytes) ?
0554 buff_bytes : ring_bytes;
0555 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
0556 p += copy_bytes;
0557 buf->size += copy_bytes;
0558 buff_bytes -= copy_bytes;
0559 dma->data_size -= copy_bytes;
0560 dma->data_offset += copy_bytes;
0561 if (dma->data_offset == dma->ring_buffer_size)
0562 dma->data_offset = 0;
0563 }
0564 if (buff_bytes > 0) {
0565 ring_bytes = dma->data_size;
0566 copy_bytes = (ring_bytes > buff_bytes) ?
0567 buff_bytes : ring_bytes;
0568 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
0569 buf->size += copy_bytes;
0570 dma->data_size -= copy_bytes;
0571 dma->data_offset += copy_bytes;
0572 if (dma->data_offset == dma->ring_buffer_size)
0573 dma->data_offset = 0;
0574 }
0575 return 0;
0576 }
0577
0578 static void netup_unidvb_dma_worker(struct work_struct *work)
0579 {
0580 struct netup_dma *dma = container_of(work, struct netup_dma, work);
0581 struct netup_unidvb_dev *ndev = dma->ndev;
0582 struct netup_unidvb_buffer *buf;
0583 unsigned long flags;
0584
0585 spin_lock_irqsave(&dma->lock, flags);
0586 if (dma->data_size == 0) {
0587 dev_dbg(&ndev->pci_dev->dev,
0588 "%s(): data_size == 0\n", __func__);
0589 goto work_done;
0590 }
0591 while (dma->data_size > 0) {
0592 if (list_empty(&dma->free_buffers)) {
0593 dev_dbg(&ndev->pci_dev->dev,
0594 "%s(): no free buffers\n", __func__);
0595 goto work_done;
0596 }
0597 buf = list_first_entry(&dma->free_buffers,
0598 struct netup_unidvb_buffer, list);
0599 if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
0600 dev_dbg(&ndev->pci_dev->dev,
0601 "%s(): buffer overflow, size %d\n",
0602 __func__, buf->size);
0603 goto work_done;
0604 }
0605 if (netup_unidvb_ring_copy(dma, buf))
0606 goto work_done;
0607 if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
0608 list_del(&buf->list);
0609 dev_dbg(&ndev->pci_dev->dev,
0610 "%s(): buffer %p done, size %d\n",
0611 __func__, buf, buf->size);
0612 buf->vb.vb2_buf.timestamp = ktime_get_ns();
0613 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
0614 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
0615 }
0616 }
0617 work_done:
0618 dma->data_size = 0;
0619 spin_unlock_irqrestore(&dma->lock, flags);
0620 }
0621
0622 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
0623 {
0624 struct netup_unidvb_buffer *buf;
0625 unsigned long flags;
0626
0627 spin_lock_irqsave(&dma->lock, flags);
0628 while (!list_empty(&dma->free_buffers)) {
0629 buf = list_first_entry(&dma->free_buffers,
0630 struct netup_unidvb_buffer, list);
0631 list_del(&buf->list);
0632 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
0633 }
0634 spin_unlock_irqrestore(&dma->lock, flags);
0635 }
0636
0637 static void netup_unidvb_dma_timeout(struct timer_list *t)
0638 {
0639 struct netup_dma *dma = from_timer(dma, t, timeout);
0640 struct netup_unidvb_dev *ndev = dma->ndev;
0641
0642 dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
0643 netup_unidvb_queue_cleanup(dma);
0644 }
0645
0646 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
0647 {
0648 struct netup_dma *dma;
0649 struct device *dev = &ndev->pci_dev->dev;
0650
0651 if (num < 0 || num > 1) {
0652 dev_err(dev, "%s(): unable to register DMA%d\n",
0653 __func__, num);
0654 return -ENODEV;
0655 }
0656 dma = &ndev->dma[num];
0657 dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
0658 dma->num = num;
0659 dma->ndev = ndev;
0660 spin_lock_init(&dma->lock);
0661 INIT_WORK(&dma->work, netup_unidvb_dma_worker);
0662 INIT_LIST_HEAD(&dma->free_buffers);
0663 timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
0664 dma->ring_buffer_size = ndev->dma_size / 2;
0665 dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
0666 dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
0667 dma->ring_buffer_size * num);
0668 dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
0669 __func__, num, dma->addr_virt,
0670 (unsigned long long)dma->addr_phys,
0671 dma->ring_buffer_size);
0672 memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
0673 dma->addr_last = dma->addr_phys;
0674 dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
0675 dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
0676 ndev->bmmio0 + NETUP_DMA0_ADDR :
0677 ndev->bmmio0 + NETUP_DMA1_ADDR);
0678 writel((NETUP_DMA_BLOCKS_COUNT << 24) |
0679 (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
0680 writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
0681 writel(0, &dma->regs->start_addr_hi);
0682 writel(dma->high_addr, ndev->bmmio0 + 0x1000);
0683 writel(375000000, &dma->regs->timeout);
0684 msleep(1000);
0685 writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
0686 return 0;
0687 }
0688
0689 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
0690 {
0691 struct netup_dma *dma;
0692
0693 if (num < 0 || num > 1)
0694 return;
0695 dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
0696 dma = &ndev->dma[num];
0697 netup_unidvb_dma_enable(dma, 0);
0698 msleep(50);
0699 cancel_work_sync(&dma->work);
0700 del_timer(&dma->timeout);
0701 }
0702
0703 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
0704 {
0705 int res;
0706
0707 res = netup_unidvb_dma_init(ndev, 0);
0708 if (res)
0709 return res;
0710 res = netup_unidvb_dma_init(ndev, 1);
0711 if (res) {
0712 netup_unidvb_dma_fini(ndev, 0);
0713 return res;
0714 }
0715 netup_unidvb_dma_enable(&ndev->dma[0], 0);
0716 netup_unidvb_dma_enable(&ndev->dma[1], 0);
0717 return 0;
0718 }
0719
0720 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
0721 struct pci_dev *pci_dev)
0722 {
0723 int res;
0724
0725 writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
0726 res = netup_unidvb_ci_register(ndev, 0, pci_dev);
0727 if (res)
0728 return res;
0729 res = netup_unidvb_ci_register(ndev, 1, pci_dev);
0730 if (res)
0731 netup_unidvb_ci_unregister(ndev, 0);
0732 return res;
0733 }
0734
0735 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
0736 {
0737 if (!request_mem_region(pci_resource_start(pci_dev, 0),
0738 pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
0739 dev_err(&pci_dev->dev,
0740 "%s(): unable to request MMIO bar 0 at 0x%llx\n",
0741 __func__,
0742 (unsigned long long)pci_resource_start(pci_dev, 0));
0743 return -EBUSY;
0744 }
0745 if (!request_mem_region(pci_resource_start(pci_dev, 1),
0746 pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
0747 dev_err(&pci_dev->dev,
0748 "%s(): unable to request MMIO bar 1 at 0x%llx\n",
0749 __func__,
0750 (unsigned long long)pci_resource_start(pci_dev, 1));
0751 release_mem_region(pci_resource_start(pci_dev, 0),
0752 pci_resource_len(pci_dev, 0));
0753 return -EBUSY;
0754 }
0755 return 0;
0756 }
0757
0758 static int netup_unidvb_request_modules(struct device *dev)
0759 {
0760 static const char * const modules[] = {
0761 "lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
0762 };
0763 const char * const *curr_mod = modules;
0764 int err;
0765
0766 while (*curr_mod != NULL) {
0767 err = request_module(*curr_mod);
0768 if (err) {
0769 dev_warn(dev, "request_module(%s) failed: %d\n",
0770 *curr_mod, err);
0771 }
0772 ++curr_mod;
0773 }
0774 return 0;
0775 }
0776
0777 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
0778 const struct pci_device_id *pci_id)
0779 {
0780 u8 board_revision;
0781 u16 board_vendor;
0782 struct netup_unidvb_dev *ndev;
0783 int old_firmware = 0;
0784
0785 netup_unidvb_request_modules(&pci_dev->dev);
0786
0787
0788 if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
0789 dev_err(&pci_dev->dev,
0790 "netup_unidvb: expected card revision %d, got %d\n",
0791 NETUP_PCI_DEV_REVISION, pci_dev->revision);
0792 dev_err(&pci_dev->dev,
0793 "Please upgrade firmware!\n");
0794 dev_err(&pci_dev->dev,
0795 "Instructions on http://www.netup.tv\n");
0796 old_firmware = 1;
0797 spi_enable = 1;
0798 }
0799
0800
0801 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
0802 if (!ndev)
0803 goto dev_alloc_err;
0804
0805
0806 if (pci_dev->device == NETUP_HW_REV_1_3)
0807 ndev->rev = NETUP_HW_REV_1_3;
0808 else
0809 ndev->rev = NETUP_HW_REV_1_4;
0810
0811 dev_info(&pci_dev->dev,
0812 "%s(): board (0x%x) hardware revision 0x%x\n",
0813 __func__, pci_dev->device, ndev->rev);
0814
0815 ndev->old_fw = old_firmware;
0816 ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
0817 if (!ndev->wq) {
0818 dev_err(&pci_dev->dev,
0819 "%s(): unable to create workqueue\n", __func__);
0820 goto wq_create_err;
0821 }
0822 ndev->pci_dev = pci_dev;
0823 ndev->pci_bus = pci_dev->bus->number;
0824 ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
0825 ndev->pci_func = PCI_FUNC(pci_dev->devfn);
0826 ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
0827 pci_set_drvdata(pci_dev, ndev);
0828
0829 dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
0830 __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
0831
0832 if (pci_enable_device(pci_dev)) {
0833 dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
0834 __func__);
0835 goto pci_enable_err;
0836 }
0837
0838 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
0839 pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
0840 if (board_vendor != NETUP_VENDOR_ID) {
0841 dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
0842 __func__, board_vendor);
0843 goto pci_detect_err;
0844 }
0845 dev_info(&pci_dev->dev,
0846 "%s(): board vendor 0x%x, revision 0x%x\n",
0847 __func__, board_vendor, board_revision);
0848 pci_set_master(pci_dev);
0849 if (dma_set_mask(&pci_dev->dev, 0xffffffff) < 0) {
0850 dev_err(&pci_dev->dev,
0851 "%s(): 32bit PCI DMA is not supported\n", __func__);
0852 goto pci_detect_err;
0853 }
0854 dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
0855
0856 pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
0857 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
0858 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
0859
0860 pcie_capability_clear_and_set_word(pci_dev,
0861 PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
0862
0863 if (netup_unidvb_request_mmio(pci_dev)) {
0864 dev_err(&pci_dev->dev,
0865 "%s(): unable to request MMIO regions\n", __func__);
0866 goto pci_detect_err;
0867 }
0868 ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
0869 pci_resource_len(pci_dev, 0));
0870 if (!ndev->lmmio0) {
0871 dev_err(&pci_dev->dev,
0872 "%s(): unable to remap MMIO bar 0\n", __func__);
0873 goto pci_bar0_error;
0874 }
0875 ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
0876 pci_resource_len(pci_dev, 1));
0877 if (!ndev->lmmio1) {
0878 dev_err(&pci_dev->dev,
0879 "%s(): unable to remap MMIO bar 1\n", __func__);
0880 goto pci_bar1_error;
0881 }
0882 ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
0883 ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
0884 dev_info(&pci_dev->dev,
0885 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
0886 __func__,
0887 ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
0888 ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
0889 pci_dev->irq);
0890 if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
0891 "netup_unidvb", pci_dev) < 0) {
0892 dev_err(&pci_dev->dev,
0893 "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
0894 goto irq_request_err;
0895 }
0896 ndev->dma_size = 2 * 188 *
0897 NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
0898 ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
0899 ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
0900 if (!ndev->dma_virt) {
0901 dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
0902 __func__);
0903 goto dma_alloc_err;
0904 }
0905 netup_unidvb_dev_enable(ndev);
0906 if (spi_enable && netup_spi_init(ndev)) {
0907 dev_warn(&pci_dev->dev,
0908 "netup_unidvb: SPI flash setup failed\n");
0909 goto spi_setup_err;
0910 }
0911 if (old_firmware) {
0912 dev_err(&pci_dev->dev,
0913 "netup_unidvb: card initialization was incomplete\n");
0914 return 0;
0915 }
0916 if (netup_i2c_register(ndev)) {
0917 dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
0918 goto i2c_setup_err;
0919 }
0920
0921 writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
0922 ndev->bmmio0 + REG_IMASK_SET);
0923 usleep_range(5000, 10000);
0924 if (netup_unidvb_dvb_setup(ndev)) {
0925 dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
0926 goto dvb_setup_err;
0927 }
0928 if (netup_unidvb_ci_setup(ndev, pci_dev)) {
0929 dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
0930 goto ci_setup_err;
0931 }
0932 if (netup_unidvb_dma_setup(ndev)) {
0933 dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
0934 goto dma_setup_err;
0935 }
0936 dev_info(&pci_dev->dev,
0937 "netup_unidvb: device has been initialized\n");
0938 return 0;
0939 dma_setup_err:
0940 netup_unidvb_ci_unregister(ndev, 0);
0941 netup_unidvb_ci_unregister(ndev, 1);
0942 ci_setup_err:
0943 netup_unidvb_dvb_fini(ndev, 0);
0944 netup_unidvb_dvb_fini(ndev, 1);
0945 dvb_setup_err:
0946 netup_i2c_unregister(ndev);
0947 i2c_setup_err:
0948 if (ndev->spi)
0949 netup_spi_release(ndev);
0950 spi_setup_err:
0951 dma_free_coherent(&pci_dev->dev, ndev->dma_size,
0952 ndev->dma_virt, ndev->dma_phys);
0953 dma_alloc_err:
0954 free_irq(pci_dev->irq, pci_dev);
0955 irq_request_err:
0956 iounmap(ndev->lmmio1);
0957 pci_bar1_error:
0958 iounmap(ndev->lmmio0);
0959 pci_bar0_error:
0960 release_mem_region(pci_resource_start(pci_dev, 0),
0961 pci_resource_len(pci_dev, 0));
0962 release_mem_region(pci_resource_start(pci_dev, 1),
0963 pci_resource_len(pci_dev, 1));
0964 pci_detect_err:
0965 pci_disable_device(pci_dev);
0966 pci_enable_err:
0967 pci_set_drvdata(pci_dev, NULL);
0968 destroy_workqueue(ndev->wq);
0969 wq_create_err:
0970 kfree(ndev);
0971 dev_alloc_err:
0972 dev_err(&pci_dev->dev,
0973 "%s(): failed to initialize device\n", __func__);
0974 return -EIO;
0975 }
0976
0977 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
0978 {
0979 struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
0980
0981 dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
0982 if (!ndev->old_fw) {
0983 netup_unidvb_dma_fini(ndev, 0);
0984 netup_unidvb_dma_fini(ndev, 1);
0985 netup_unidvb_ci_unregister(ndev, 0);
0986 netup_unidvb_ci_unregister(ndev, 1);
0987 netup_unidvb_dvb_fini(ndev, 0);
0988 netup_unidvb_dvb_fini(ndev, 1);
0989 netup_i2c_unregister(ndev);
0990 }
0991 if (ndev->spi)
0992 netup_spi_release(ndev);
0993 writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
0994 dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
0995 ndev->dma_virt, ndev->dma_phys);
0996 free_irq(pci_dev->irq, pci_dev);
0997 iounmap(ndev->lmmio0);
0998 iounmap(ndev->lmmio1);
0999 release_mem_region(pci_resource_start(pci_dev, 0),
1000 pci_resource_len(pci_dev, 0));
1001 release_mem_region(pci_resource_start(pci_dev, 1),
1002 pci_resource_len(pci_dev, 1));
1003 pci_disable_device(pci_dev);
1004 pci_set_drvdata(pci_dev, NULL);
1005 destroy_workqueue(ndev->wq);
1006 kfree(ndev);
1007 dev_info(&pci_dev->dev,
1008 "%s(): device has been successfully stopped\n", __func__);
1009 }
1010
1011
1012 static const struct pci_device_id netup_unidvb_pci_tbl[] = {
1013 { PCI_DEVICE(0x1b55, 0x18f6) },
1014 { PCI_DEVICE(0x1b55, 0x18f7) },
1015 { 0, }
1016 };
1017 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
1018
1019 static struct pci_driver netup_unidvb_pci_driver = {
1020 .name = "netup_unidvb",
1021 .id_table = netup_unidvb_pci_tbl,
1022 .probe = netup_unidvb_initdev,
1023 .remove = netup_unidvb_finidev,
1024 };
1025
1026 module_pci_driver(netup_unidvb_pci_driver);