0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <linux/pagemap.h>
0038 #include <linux/pci.h>
0039 #include <linux/slab.h>
0040 #include <linux/vmalloc.h>
0041
0042 #include <drm/drm_device.h>
0043 #include <drm/via_drm.h>
0044
0045 #include "via_dmablit.h"
0046 #include "via_drv.h"
0047
0048 #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
0049 #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
0050 #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
0051
0052 typedef struct _drm_via_descriptor {
0053 uint32_t mem_addr;
0054 uint32_t dev_addr;
0055 uint32_t size;
0056 uint32_t next;
0057 } drm_via_descriptor_t;
0058
0059
0060
0061
0062
0063
0064
0065
0066 static void
0067 via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
0068 {
0069 int num_desc = vsg->num_desc;
0070 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
0071 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
0072 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
0073 descriptor_this_page;
0074 dma_addr_t next = vsg->chain_start;
0075
0076 while (num_desc--) {
0077 if (descriptor_this_page-- == 0) {
0078 cur_descriptor_page--;
0079 descriptor_this_page = vsg->descriptors_per_page - 1;
0080 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
0081 descriptor_this_page;
0082 }
0083 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
0084 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
0085 next = (dma_addr_t) desc_ptr->next;
0086 desc_ptr--;
0087 }
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097 static void
0098 via_map_blit_for_device(struct pci_dev *pdev,
0099 const drm_via_dmablit_t *xfer,
0100 drm_via_sg_info_t *vsg,
0101 int mode)
0102 {
0103 unsigned cur_descriptor_page = 0;
0104 unsigned num_descriptors_this_page = 0;
0105 unsigned char *mem_addr = xfer->mem_addr;
0106 unsigned char *cur_mem;
0107 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
0108 uint32_t fb_addr = xfer->fb_addr;
0109 uint32_t cur_fb;
0110 unsigned long line_len;
0111 unsigned remaining_len;
0112 int num_desc = 0;
0113 int cur_line;
0114 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
0115 drm_via_descriptor_t *desc_ptr = NULL;
0116
0117 if (mode == 1)
0118 desc_ptr = vsg->desc_pages[cur_descriptor_page];
0119
0120 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
0121
0122 line_len = xfer->line_length;
0123 cur_fb = fb_addr;
0124 cur_mem = mem_addr;
0125
0126 while (line_len > 0) {
0127
0128 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
0129 line_len -= remaining_len;
0130
0131 if (mode == 1) {
0132 desc_ptr->mem_addr =
0133 dma_map_page(&pdev->dev,
0134 vsg->pages[VIA_PFN(cur_mem) -
0135 VIA_PFN(first_addr)],
0136 VIA_PGOFF(cur_mem), remaining_len,
0137 vsg->direction);
0138 desc_ptr->dev_addr = cur_fb;
0139
0140 desc_ptr->size = remaining_len;
0141 desc_ptr->next = (uint32_t) next;
0142 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
0143 DMA_TO_DEVICE);
0144 desc_ptr++;
0145 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
0146 num_descriptors_this_page = 0;
0147 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
0148 }
0149 }
0150
0151 num_desc++;
0152 cur_mem += remaining_len;
0153 cur_fb += remaining_len;
0154 }
0155
0156 mem_addr += xfer->mem_stride;
0157 fb_addr += xfer->fb_stride;
0158 }
0159
0160 if (mode == 1) {
0161 vsg->chain_start = next;
0162 vsg->state = dr_via_device_mapped;
0163 }
0164 vsg->num_desc = num_desc;
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174 static void
0175 via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
0176 {
0177 int i;
0178
0179 switch (vsg->state) {
0180 case dr_via_device_mapped:
0181 via_unmap_blit_from_device(pdev, vsg);
0182 fallthrough;
0183 case dr_via_desc_pages_alloc:
0184 for (i = 0; i < vsg->num_desc_pages; ++i) {
0185 if (vsg->desc_pages[i] != NULL)
0186 free_page((unsigned long)vsg->desc_pages[i]);
0187 }
0188 kfree(vsg->desc_pages);
0189 fallthrough;
0190 case dr_via_pages_locked:
0191 unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
0192 (vsg->direction == DMA_FROM_DEVICE));
0193 fallthrough;
0194 case dr_via_pages_alloc:
0195 vfree(vsg->pages);
0196 fallthrough;
0197 default:
0198 vsg->state = dr_via_sg_init;
0199 }
0200 vfree(vsg->bounce_buffer);
0201 vsg->bounce_buffer = NULL;
0202 vsg->free_on_sequence = 0;
0203 }
0204
0205
0206
0207
0208
0209 static void
0210 via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
0211 {
0212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0213
0214 via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
0215 via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
0216 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
0217 VIA_DMA_CSR_DE);
0218 via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
0219 via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
0220 via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
0221 wmb();
0222 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
0223 via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
0224 }
0225
0226
0227
0228
0229
0230
0231 static int
0232 via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
0233 {
0234 int ret;
0235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
0236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
0237 first_pfn + 1;
0238
0239 vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
0240 if (NULL == vsg->pages)
0241 return -ENOMEM;
0242 ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
0243 vsg->num_pages,
0244 vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
0245 vsg->pages);
0246 if (ret != vsg->num_pages) {
0247 if (ret < 0)
0248 return ret;
0249 vsg->state = dr_via_pages_locked;
0250 return -EINVAL;
0251 }
0252 vsg->state = dr_via_pages_locked;
0253 DRM_DEBUG("DMA pages locked\n");
0254 return 0;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263 static int
0264 via_alloc_desc_pages(drm_via_sg_info_t *vsg)
0265 {
0266 int i;
0267
0268 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
0269 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
0270 vsg->descriptors_per_page;
0271
0272 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
0273 return -ENOMEM;
0274
0275 vsg->state = dr_via_desc_pages_alloc;
0276 for (i = 0; i < vsg->num_desc_pages; ++i) {
0277 if (NULL == (vsg->desc_pages[i] =
0278 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
0279 return -ENOMEM;
0280 }
0281 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
0282 vsg->num_desc);
0283 return 0;
0284 }
0285
0286 static void
0287 via_abort_dmablit(struct drm_device *dev, int engine)
0288 {
0289 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0290
0291 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
0292 }
0293
0294 static void
0295 via_dmablit_engine_off(struct drm_device *dev, int engine)
0296 {
0297 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0298
0299 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
0300 }
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void
0312 via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
0313 {
0314 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0315 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
0316 int cur;
0317 int done_transfer;
0318 unsigned long irqsave = 0;
0319 uint32_t status = 0;
0320
0321 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
0322 engine, from_irq, (unsigned long) blitq);
0323
0324 if (from_irq)
0325 spin_lock(&blitq->blit_lock);
0326 else
0327 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0328
0329 done_transfer = blitq->is_active &&
0330 ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
0331 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
0332
0333 cur = blitq->cur;
0334 if (done_transfer) {
0335
0336 blitq->blits[cur]->aborted = blitq->aborting;
0337 blitq->done_blit_handle++;
0338 wake_up(blitq->blit_queue + cur);
0339
0340 cur++;
0341 if (cur >= VIA_NUM_BLIT_SLOTS)
0342 cur = 0;
0343 blitq->cur = cur;
0344
0345
0346
0347
0348
0349 via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
0350
0351 blitq->is_active = 0;
0352 blitq->aborting = 0;
0353 schedule_work(&blitq->wq);
0354
0355 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
0356
0357
0358
0359
0360
0361 via_abort_dmablit(dev, engine);
0362 blitq->aborting = 1;
0363 blitq->end = jiffies + HZ;
0364 }
0365
0366 if (!blitq->is_active) {
0367 if (blitq->num_outstanding) {
0368 via_fire_dmablit(dev, blitq->blits[cur], engine);
0369 blitq->is_active = 1;
0370 blitq->cur = cur;
0371 blitq->num_outstanding--;
0372 blitq->end = jiffies + HZ;
0373 if (!timer_pending(&blitq->poll_timer))
0374 mod_timer(&blitq->poll_timer, jiffies + 1);
0375 } else {
0376 if (timer_pending(&blitq->poll_timer))
0377 del_timer(&blitq->poll_timer);
0378 via_dmablit_engine_off(dev, engine);
0379 }
0380 }
0381
0382 if (from_irq)
0383 spin_unlock(&blitq->blit_lock);
0384 else
0385 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0386 }
0387
0388
0389
0390
0391
0392
0393
0394 static int
0395 via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
0396 {
0397 unsigned long irqsave;
0398 uint32_t slot;
0399 int active;
0400
0401 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0402
0403
0404
0405
0406
0407 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
0408 ((blitq->cur_blit_handle - handle) <= (1 << 23));
0409
0410 if (queue && active) {
0411 slot = handle - blitq->done_blit_handle + blitq->cur - 1;
0412 if (slot >= VIA_NUM_BLIT_SLOTS)
0413 slot -= VIA_NUM_BLIT_SLOTS;
0414 *queue = blitq->blit_queue + slot;
0415 }
0416
0417 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0418
0419 return active;
0420 }
0421
0422
0423
0424
0425
0426 static int
0427 via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
0428 {
0429
0430 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0431 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
0432 wait_queue_head_t *queue;
0433 int ret = 0;
0434
0435 if (via_dmablit_active(blitq, engine, handle, &queue)) {
0436 VIA_WAIT_ON(ret, *queue, 3 * HZ,
0437 !via_dmablit_active(blitq, engine, handle, NULL));
0438 }
0439 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
0440 handle, engine, ret);
0441
0442 return ret;
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456 static void
0457 via_dmablit_timer(struct timer_list *t)
0458 {
0459 drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
0460 struct drm_device *dev = blitq->dev;
0461 int engine = (int)
0462 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
0463
0464 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
0465 (unsigned long) jiffies);
0466
0467 via_dmablit_handler(dev, engine, 0);
0468
0469 if (!timer_pending(&blitq->poll_timer)) {
0470 mod_timer(&blitq->poll_timer, jiffies + 1);
0471
0472
0473
0474
0475
0476
0477 via_dmablit_handler(dev, engine, 0);
0478
0479 }
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 static void
0493 via_dmablit_workqueue(struct work_struct *work)
0494 {
0495 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
0496 struct drm_device *dev = blitq->dev;
0497 struct pci_dev *pdev = to_pci_dev(dev->dev);
0498 unsigned long irqsave;
0499 drm_via_sg_info_t *cur_sg;
0500 int cur_released;
0501
0502
0503 DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
0504 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
0505
0506 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0507
0508 while (blitq->serviced != blitq->cur) {
0509
0510 cur_released = blitq->serviced++;
0511
0512 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
0513
0514 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
0515 blitq->serviced = 0;
0516
0517 cur_sg = blitq->blits[cur_released];
0518 blitq->num_free++;
0519
0520 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0521
0522 wake_up(&blitq->busy_queue);
0523
0524 via_free_sg_info(pdev, cur_sg);
0525 kfree(cur_sg);
0526
0527 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0528 }
0529
0530 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0531 }
0532
0533
0534
0535
0536
0537
0538
0539 void
0540 via_init_dmablit(struct drm_device *dev)
0541 {
0542 int i, j;
0543 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0544 struct pci_dev *pdev = to_pci_dev(dev->dev);
0545 drm_via_blitq_t *blitq;
0546
0547 pci_set_master(pdev);
0548
0549 for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
0550 blitq = dev_priv->blit_queues + i;
0551 blitq->dev = dev;
0552 blitq->cur_blit_handle = 0;
0553 blitq->done_blit_handle = 0;
0554 blitq->head = 0;
0555 blitq->cur = 0;
0556 blitq->serviced = 0;
0557 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
0558 blitq->num_outstanding = 0;
0559 blitq->is_active = 0;
0560 blitq->aborting = 0;
0561 spin_lock_init(&blitq->blit_lock);
0562 for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
0563 init_waitqueue_head(blitq->blit_queue + j);
0564 init_waitqueue_head(&blitq->busy_queue);
0565 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
0566 timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
0567 }
0568 }
0569
0570
0571
0572
0573
0574
0575 static int
0576 via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
0577 {
0578 struct pci_dev *pdev = to_pci_dev(dev->dev);
0579 int draw = xfer->to_fb;
0580 int ret = 0;
0581
0582 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
0583 vsg->bounce_buffer = NULL;
0584
0585 vsg->state = dr_via_sg_init;
0586
0587 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
0588 DRM_ERROR("Zero size bitblt.\n");
0589 return -EINVAL;
0590 }
0591
0592
0593
0594
0595
0596
0597
0598
0599 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
0600 DRM_ERROR("Too large system memory stride. Stride: %d, "
0601 "Length: %d\n", xfer->mem_stride, xfer->line_length);
0602 return -EINVAL;
0603 }
0604
0605 if ((xfer->mem_stride == xfer->line_length) &&
0606 (xfer->fb_stride == xfer->line_length)) {
0607 xfer->mem_stride *= xfer->num_lines;
0608 xfer->line_length = xfer->mem_stride;
0609 xfer->fb_stride = xfer->mem_stride;
0610 xfer->num_lines = 1;
0611 }
0612
0613
0614
0615
0616
0617
0618 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
0619 DRM_ERROR("Too large PCI DMA bitblt.\n");
0620 return -EINVAL;
0621 }
0622
0623
0624
0625
0626
0627
0628 if (xfer->mem_stride < xfer->line_length ||
0629 abs(xfer->fb_stride) < xfer->line_length) {
0630 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
0631 return -EINVAL;
0632 }
0633
0634
0635
0636
0637
0638
0639
0640 #ifdef VIA_BUGFREE
0641 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
0642 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
0643 DRM_ERROR("Invalid DRM bitblt alignment.\n");
0644 return -EINVAL;
0645 }
0646 #else
0647 if ((((unsigned long)xfer->mem_addr & 15) ||
0648 ((unsigned long)xfer->fb_addr & 3)) ||
0649 ((xfer->num_lines > 1) &&
0650 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
0651 DRM_ERROR("Invalid DRM bitblt alignment.\n");
0652 return -EINVAL;
0653 }
0654 #endif
0655
0656 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
0657 DRM_ERROR("Could not lock DMA pages.\n");
0658 via_free_sg_info(pdev, vsg);
0659 return ret;
0660 }
0661
0662 via_map_blit_for_device(pdev, xfer, vsg, 0);
0663 if (0 != (ret = via_alloc_desc_pages(vsg))) {
0664 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
0665 via_free_sg_info(pdev, vsg);
0666 return ret;
0667 }
0668 via_map_blit_for_device(pdev, xfer, vsg, 1);
0669
0670 return 0;
0671 }
0672
0673
0674
0675
0676
0677
0678
0679 static int
0680 via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
0681 {
0682 int ret = 0;
0683 unsigned long irqsave;
0684
0685 DRM_DEBUG("Num free is %d\n", blitq->num_free);
0686 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0687 while (blitq->num_free == 0) {
0688 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0689
0690 VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
0691 if (ret)
0692 return (-EINTR == ret) ? -EAGAIN : ret;
0693
0694 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0695 }
0696
0697 blitq->num_free--;
0698 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0699
0700 return 0;
0701 }
0702
0703
0704
0705
0706
0707 static void
0708 via_dmablit_release_slot(drm_via_blitq_t *blitq)
0709 {
0710 unsigned long irqsave;
0711
0712 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0713 blitq->num_free++;
0714 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0715 wake_up(&blitq->busy_queue);
0716 }
0717
0718
0719
0720
0721
0722
0723 static int
0724 via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
0725 {
0726 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
0727 drm_via_sg_info_t *vsg;
0728 drm_via_blitq_t *blitq;
0729 int ret;
0730 int engine;
0731 unsigned long irqsave;
0732
0733 if (dev_priv == NULL) {
0734 DRM_ERROR("Called without initialization.\n");
0735 return -EINVAL;
0736 }
0737
0738 engine = (xfer->to_fb) ? 0 : 1;
0739 blitq = dev_priv->blit_queues + engine;
0740 if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
0741 return ret;
0742 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
0743 via_dmablit_release_slot(blitq);
0744 return -ENOMEM;
0745 }
0746 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
0747 via_dmablit_release_slot(blitq);
0748 kfree(vsg);
0749 return ret;
0750 }
0751 spin_lock_irqsave(&blitq->blit_lock, irqsave);
0752
0753 blitq->blits[blitq->head++] = vsg;
0754 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
0755 blitq->head = 0;
0756 blitq->num_outstanding++;
0757 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
0758
0759 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
0760 xfer->sync.engine = engine;
0761
0762 via_dmablit_handler(dev, engine, 0);
0763
0764 return 0;
0765 }
0766
0767
0768
0769
0770
0771
0772
0773
0774 int
0775 via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
0776 {
0777 drm_via_blitsync_t *sync = data;
0778 int err;
0779
0780 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
0781 return -EINVAL;
0782
0783 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
0784
0785 if (-EINTR == err)
0786 err = -EAGAIN;
0787
0788 return err;
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798 int
0799 via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
0800 {
0801 drm_via_dmablit_t *xfer = data;
0802 int err;
0803
0804 err = via_dmablit(dev, xfer);
0805
0806 return err;
0807 }