0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include <linux/delay.h>
0039
0040 #include "mga_drv.h"
0041
0042 #define MGA_DEFAULT_USEC_TIMEOUT 10000
0043 #define MGA_FREELIST_DEBUG 0
0044
0045 #define MINIMAL_CLEANUP 0
0046 #define FULL_CLEANUP 1
0047 static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
0048
0049
0050
0051
0052
0053 int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
0054 {
0055 u32 status = 0;
0056 int i;
0057 DRM_DEBUG("\n");
0058
0059 for (i = 0; i < dev_priv->usec_timeout; i++) {
0060 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
0061 if (status == MGA_ENDPRDMASTS) {
0062 MGA_WRITE8(MGA_CRTC_INDEX, 0);
0063 return 0;
0064 }
0065 udelay(1);
0066 }
0067
0068 #if MGA_DMA_DEBUG
0069 DRM_ERROR("failed!\n");
0070 DRM_INFO(" status=0x%08x\n", status);
0071 #endif
0072 return -EBUSY;
0073 }
0074
0075 static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
0076 {
0077 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
0078 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
0079
0080 DRM_DEBUG("\n");
0081
0082
0083
0084 primary->tail = 0;
0085 primary->space = primary->size;
0086 primary->last_flush = 0;
0087
0088 sarea_priv->last_wrap = 0;
0089
0090
0091
0092
0093
0094
0095
0096 return 0;
0097 }
0098
0099
0100
0101
0102
0103 void mga_do_dma_flush(drm_mga_private_t *dev_priv)
0104 {
0105 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
0106 u32 head, tail;
0107 u32 status = 0;
0108 int i;
0109 DMA_LOCALS;
0110 DRM_DEBUG("\n");
0111
0112
0113 for (i = 0; i < dev_priv->usec_timeout; i++) {
0114 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
0115 if (status == MGA_ENDPRDMASTS)
0116 break;
0117 udelay(1);
0118 }
0119
0120 if (primary->tail == primary->last_flush) {
0121 DRM_DEBUG(" bailing out...\n");
0122 return;
0123 }
0124
0125 tail = primary->tail + dev_priv->primary->offset;
0126
0127
0128
0129
0130
0131 BEGIN_DMA(1);
0132
0133 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
0134 MGA_DMAPAD, 0x00000000,
0135 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
0136
0137 ADVANCE_DMA();
0138
0139 primary->last_flush = primary->tail;
0140
0141 head = MGA_READ(MGA_PRIMADDRESS);
0142
0143 if (head <= tail)
0144 primary->space = primary->size - primary->tail;
0145 else
0146 primary->space = head - tail;
0147
0148 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
0149 DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
0150 DRM_DEBUG(" space = 0x%06x\n", primary->space);
0151
0152 mga_flush_write_combine();
0153 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
0154
0155 DRM_DEBUG("done.\n");
0156 }
0157
0158 void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
0159 {
0160 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
0161 u32 head, tail;
0162 DMA_LOCALS;
0163 DRM_DEBUG("\n");
0164
0165 BEGIN_DMA_WRAP();
0166
0167 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
0168 MGA_DMAPAD, 0x00000000,
0169 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
0170
0171 ADVANCE_DMA();
0172
0173 tail = primary->tail + dev_priv->primary->offset;
0174
0175 primary->tail = 0;
0176 primary->last_flush = 0;
0177 primary->last_wrap++;
0178
0179 head = MGA_READ(MGA_PRIMADDRESS);
0180
0181 if (head == dev_priv->primary->offset)
0182 primary->space = primary->size;
0183 else
0184 primary->space = head - dev_priv->primary->offset;
0185
0186 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
0187 DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
0188 DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
0189 DRM_DEBUG(" space = 0x%06x\n", primary->space);
0190
0191 mga_flush_write_combine();
0192 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
0193
0194 set_bit(0, &primary->wrapped);
0195 DRM_DEBUG("done.\n");
0196 }
0197
0198 void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
0199 {
0200 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
0201 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
0202 u32 head = dev_priv->primary->offset;
0203 DRM_DEBUG("\n");
0204
0205 sarea_priv->last_wrap++;
0206 DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap);
0207
0208 mga_flush_write_combine();
0209 MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
0210
0211 clear_bit(0, &primary->wrapped);
0212 DRM_DEBUG("done.\n");
0213 }
0214
0215
0216
0217
0218
0219 #define MGA_BUFFER_USED (~0)
0220 #define MGA_BUFFER_FREE 0
0221
0222 #if MGA_FREELIST_DEBUG
0223 static void mga_freelist_print(struct drm_device *dev)
0224 {
0225 drm_mga_private_t *dev_priv = dev->dev_private;
0226 drm_mga_freelist_t *entry;
0227
0228 DRM_INFO("\n");
0229 DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
0230 dev_priv->sarea_priv->last_dispatch,
0231 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
0232 dev_priv->primary->offset));
0233 DRM_INFO("current freelist:\n");
0234
0235 for (entry = dev_priv->head->next; entry; entry = entry->next) {
0236 DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n",
0237 entry, entry->buf->idx, entry->age.head,
0238 (unsigned long)(entry->age.head - dev_priv->primary->offset));
0239 }
0240 DRM_INFO("\n");
0241 }
0242 #endif
0243
0244 static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
0245 {
0246 struct drm_device_dma *dma = dev->dma;
0247 struct drm_buf *buf;
0248 drm_mga_buf_priv_t *buf_priv;
0249 drm_mga_freelist_t *entry;
0250 int i;
0251 DRM_DEBUG("count=%d\n", dma->buf_count);
0252
0253 dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
0254 if (dev_priv->head == NULL)
0255 return -ENOMEM;
0256
0257 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
0258
0259 for (i = 0; i < dma->buf_count; i++) {
0260 buf = dma->buflist[i];
0261 buf_priv = buf->dev_private;
0262
0263 entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
0264 if (entry == NULL)
0265 return -ENOMEM;
0266
0267 entry->next = dev_priv->head->next;
0268 entry->prev = dev_priv->head;
0269 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
0270 entry->buf = buf;
0271
0272 if (dev_priv->head->next != NULL)
0273 dev_priv->head->next->prev = entry;
0274 if (entry->next == NULL)
0275 dev_priv->tail = entry;
0276
0277 buf_priv->list_entry = entry;
0278 buf_priv->discard = 0;
0279 buf_priv->dispatched = 0;
0280
0281 dev_priv->head->next = entry;
0282 }
0283
0284 return 0;
0285 }
0286
0287 static void mga_freelist_cleanup(struct drm_device *dev)
0288 {
0289 drm_mga_private_t *dev_priv = dev->dev_private;
0290 drm_mga_freelist_t *entry;
0291 drm_mga_freelist_t *next;
0292 DRM_DEBUG("\n");
0293
0294 entry = dev_priv->head;
0295 while (entry) {
0296 next = entry->next;
0297 kfree(entry);
0298 entry = next;
0299 }
0300
0301 dev_priv->head = dev_priv->tail = NULL;
0302 }
0303
0304 #if 0
0305
0306
0307 static void mga_freelist_reset(struct drm_device *dev)
0308 {
0309 struct drm_device_dma *dma = dev->dma;
0310 struct drm_buf *buf;
0311 drm_mga_buf_priv_t *buf_priv;
0312 int i;
0313
0314 for (i = 0; i < dma->buf_count; i++) {
0315 buf = dma->buflist[i];
0316 buf_priv = buf->dev_private;
0317 SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
0318 }
0319 }
0320 #endif
0321
0322 static struct drm_buf *mga_freelist_get(struct drm_device * dev)
0323 {
0324 drm_mga_private_t *dev_priv = dev->dev_private;
0325 drm_mga_freelist_t *next;
0326 drm_mga_freelist_t *prev;
0327 drm_mga_freelist_t *tail = dev_priv->tail;
0328 u32 head, wrap;
0329 DRM_DEBUG("\n");
0330
0331 head = MGA_READ(MGA_PRIMADDRESS);
0332 wrap = dev_priv->sarea_priv->last_wrap;
0333
0334 DRM_DEBUG(" tail=0x%06lx %d\n",
0335 tail->age.head ?
0336 (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
0337 tail->age.wrap);
0338 DRM_DEBUG(" head=0x%06lx %d\n",
0339 (unsigned long)(head - dev_priv->primary->offset), wrap);
0340
0341 if (TEST_AGE(&tail->age, head, wrap)) {
0342 prev = dev_priv->tail->prev;
0343 next = dev_priv->tail;
0344 prev->next = NULL;
0345 next->prev = next->next = NULL;
0346 dev_priv->tail = prev;
0347 SET_AGE(&next->age, MGA_BUFFER_USED, 0);
0348 return next->buf;
0349 }
0350
0351 DRM_DEBUG("returning NULL!\n");
0352 return NULL;
0353 }
0354
0355 int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
0356 {
0357 drm_mga_private_t *dev_priv = dev->dev_private;
0358 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
0359 drm_mga_freelist_t *head, *entry, *prev;
0360
0361 DRM_DEBUG("age=0x%06lx wrap=%d\n",
0362 (unsigned long)(buf_priv->list_entry->age.head -
0363 dev_priv->primary->offset),
0364 buf_priv->list_entry->age.wrap);
0365
0366 entry = buf_priv->list_entry;
0367 head = dev_priv->head;
0368
0369 if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
0370 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
0371 prev = dev_priv->tail;
0372 prev->next = entry;
0373 entry->prev = prev;
0374 entry->next = NULL;
0375 } else {
0376 prev = head->next;
0377 head->next = entry;
0378 prev->prev = entry;
0379 entry->prev = head;
0380 entry->next = prev;
0381 }
0382
0383 return 0;
0384 }
0385
0386
0387
0388
0389
0390 int mga_driver_load(struct drm_device *dev, unsigned long flags)
0391 {
0392 struct pci_dev *pdev = to_pci_dev(dev->dev);
0393 drm_mga_private_t *dev_priv;
0394 int ret;
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 if ((pdev->device == 0x0525) && pdev->bus->self
0405 && (pdev->bus->self->vendor == 0x3388)
0406 && (pdev->bus->self->device == 0x0021)
0407 && dev->agp) {
0408
0409
0410 arch_phys_wc_del(dev->agp->agp_mtrr);
0411 kfree(dev->agp);
0412 dev->agp = NULL;
0413 }
0414 dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
0415 if (!dev_priv)
0416 return -ENOMEM;
0417
0418 dev->dev_private = (void *)dev_priv;
0419
0420 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
0421 dev_priv->chipset = flags;
0422
0423 pci_set_master(pdev);
0424
0425 dev_priv->mmio_base = pci_resource_start(pdev, 1);
0426 dev_priv->mmio_size = pci_resource_len(pdev, 1);
0427
0428 ret = drm_vblank_init(dev, 1);
0429
0430 if (ret) {
0431 (void) mga_driver_unload(dev);
0432 return ret;
0433 }
0434
0435 return 0;
0436 }
0437
0438 #if IS_ENABLED(CONFIG_AGP)
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
0455 drm_mga_dma_bootstrap_t *dma_bs)
0456 {
0457 drm_mga_private_t *const dev_priv =
0458 (drm_mga_private_t *) dev->dev_private;
0459 unsigned int warp_size = MGA_WARP_UCODE_SIZE;
0460 int err;
0461 unsigned offset;
0462 const unsigned secondary_size = dma_bs->secondary_bin_count
0463 * dma_bs->secondary_bin_size;
0464 const unsigned agp_size = (dma_bs->agp_size << 20);
0465 struct drm_buf_desc req;
0466 struct drm_agp_mode mode;
0467 struct drm_agp_info info;
0468 struct drm_agp_buffer agp_req;
0469 struct drm_agp_binding bind_req;
0470
0471
0472 err = drm_legacy_agp_acquire(dev);
0473 if (err) {
0474 DRM_ERROR("Unable to acquire AGP: %d\n", err);
0475 return err;
0476 }
0477
0478 err = drm_legacy_agp_info(dev, &info);
0479 if (err) {
0480 DRM_ERROR("Unable to get AGP info: %d\n", err);
0481 return err;
0482 }
0483
0484 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
0485 err = drm_legacy_agp_enable(dev, mode);
0486 if (err) {
0487 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
0488 return err;
0489 }
0490
0491
0492
0493
0494
0495 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
0496 if (mode.mode & 0x02)
0497 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
0498 else
0499 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
0500 }
0501
0502
0503 agp_req.size = agp_size;
0504 agp_req.type = 0;
0505 err = drm_legacy_agp_alloc(dev, &agp_req);
0506 if (err) {
0507 dev_priv->agp_size = 0;
0508 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
0509 dma_bs->agp_size);
0510 return err;
0511 }
0512
0513 dev_priv->agp_size = agp_size;
0514 dev_priv->agp_handle = agp_req.handle;
0515
0516 bind_req.handle = agp_req.handle;
0517 bind_req.offset = 0;
0518 err = drm_legacy_agp_bind(dev, &bind_req);
0519 if (err) {
0520 DRM_ERROR("Unable to bind AGP memory: %d\n", err);
0521 return err;
0522 }
0523
0524
0525
0526
0527 if (warp_size < PAGE_SIZE)
0528 warp_size = PAGE_SIZE;
0529
0530 offset = 0;
0531 err = drm_legacy_addmap(dev, offset, warp_size,
0532 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
0533 if (err) {
0534 DRM_ERROR("Unable to map WARP microcode: %d\n", err);
0535 return err;
0536 }
0537
0538 offset += warp_size;
0539 err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
0540 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
0541 if (err) {
0542 DRM_ERROR("Unable to map primary DMA region: %d\n", err);
0543 return err;
0544 }
0545
0546 offset += dma_bs->primary_size;
0547 err = drm_legacy_addmap(dev, offset, secondary_size,
0548 _DRM_AGP, 0, &dev->agp_buffer_map);
0549 if (err) {
0550 DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
0551 return err;
0552 }
0553
0554 (void)memset(&req, 0, sizeof(req));
0555 req.count = dma_bs->secondary_bin_count;
0556 req.size = dma_bs->secondary_bin_size;
0557 req.flags = _DRM_AGP_BUFFER;
0558 req.agp_start = offset;
0559
0560 err = drm_legacy_addbufs_agp(dev, &req);
0561 if (err) {
0562 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
0563 return err;
0564 }
0565
0566 {
0567 struct drm_map_list *_entry;
0568 unsigned long agp_token = 0;
0569
0570 list_for_each_entry(_entry, &dev->maplist, head) {
0571 if (_entry->map == dev->agp_buffer_map)
0572 agp_token = _entry->user_token;
0573 }
0574 if (!agp_token)
0575 return -EFAULT;
0576
0577 dev->agp_buffer_token = agp_token;
0578 }
0579
0580 offset += secondary_size;
0581 err = drm_legacy_addmap(dev, offset, agp_size - offset,
0582 _DRM_AGP, 0, &dev_priv->agp_textures);
0583 if (err) {
0584 DRM_ERROR("Unable to map AGP texture region %d\n", err);
0585 return err;
0586 }
0587
0588 drm_legacy_ioremap(dev_priv->warp, dev);
0589 drm_legacy_ioremap(dev_priv->primary, dev);
0590 drm_legacy_ioremap(dev->agp_buffer_map, dev);
0591
0592 if (!dev_priv->warp->handle ||
0593 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
0594 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
0595 dev_priv->warp->handle, dev_priv->primary->handle,
0596 dev->agp_buffer_map->handle);
0597 return -ENOMEM;
0598 }
0599
0600 dev_priv->dma_access = MGA_PAGPXFER;
0601 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
0602
0603 DRM_INFO("Initialized card for AGP DMA.\n");
0604 return 0;
0605 }
0606 #else
0607 static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
0608 drm_mga_dma_bootstrap_t *dma_bs)
0609 {
0610 return -EINVAL;
0611 }
0612 #endif
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
0629 drm_mga_dma_bootstrap_t *dma_bs)
0630 {
0631 drm_mga_private_t *const dev_priv =
0632 (drm_mga_private_t *) dev->dev_private;
0633 unsigned int warp_size = MGA_WARP_UCODE_SIZE;
0634 unsigned int primary_size;
0635 unsigned int bin_count;
0636 int err;
0637 struct drm_buf_desc req;
0638
0639 if (dev->dma == NULL) {
0640 DRM_ERROR("dev->dma is NULL\n");
0641 return -EFAULT;
0642 }
0643
0644
0645
0646
0647 if (warp_size < PAGE_SIZE)
0648 warp_size = PAGE_SIZE;
0649
0650
0651 err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
0652 _DRM_READ_ONLY, &dev_priv->warp);
0653 if (err != 0) {
0654 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
0655 err);
0656 return err;
0657 }
0658
0659
0660
0661
0662
0663
0664 for (primary_size = dma_bs->primary_size; primary_size != 0;
0665 primary_size >>= 1) {
0666
0667 err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
0668 _DRM_READ_ONLY, &dev_priv->primary);
0669 if (!err)
0670 break;
0671 }
0672
0673 if (err != 0) {
0674 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
0675 return -ENOMEM;
0676 }
0677
0678 if (dev_priv->primary->size != dma_bs->primary_size) {
0679 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
0680 dma_bs->primary_size,
0681 (unsigned)dev_priv->primary->size);
0682 dma_bs->primary_size = dev_priv->primary->size;
0683 }
0684
0685 for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
0686 bin_count--) {
0687 (void)memset(&req, 0, sizeof(req));
0688 req.count = bin_count;
0689 req.size = dma_bs->secondary_bin_size;
0690
0691 err = drm_legacy_addbufs_pci(dev, &req);
0692 if (!err)
0693 break;
0694 }
0695
0696 if (bin_count == 0) {
0697 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
0698 return err;
0699 }
0700
0701 if (bin_count != dma_bs->secondary_bin_count) {
0702 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
0703 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
0704
0705 dma_bs->secondary_bin_count = bin_count;
0706 }
0707
0708 dev_priv->dma_access = 0;
0709 dev_priv->wagp_enable = 0;
0710
0711 dma_bs->agp_mode = 0;
0712
0713 DRM_INFO("Initialized card for PCI DMA.\n");
0714 return 0;
0715 }
0716
0717 static int mga_do_dma_bootstrap(struct drm_device *dev,
0718 drm_mga_dma_bootstrap_t *dma_bs)
0719 {
0720 const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
0721 int err;
0722 drm_mga_private_t *const dev_priv =
0723 (drm_mga_private_t *) dev->dev_private;
0724
0725 dev_priv->used_new_dma_init = 1;
0726
0727
0728
0729
0730 err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
0731 _DRM_REGISTERS, _DRM_READ_ONLY,
0732 &dev_priv->mmio);
0733 if (err) {
0734 DRM_ERROR("Unable to map MMIO region: %d\n", err);
0735 return err;
0736 }
0737
0738 err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
0739 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
0740 &dev_priv->status);
0741 if (err) {
0742 DRM_ERROR("Unable to map status region: %d\n", err);
0743 return err;
0744 }
0745
0746
0747
0748
0749
0750
0751 if (is_agp)
0752 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
0753
0754
0755
0756
0757
0758 if (err)
0759 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769 if (!is_agp || err)
0770 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
0771
0772 return err;
0773 }
0774
0775 int mga_dma_bootstrap(struct drm_device *dev, void *data,
0776 struct drm_file *file_priv)
0777 {
0778 drm_mga_dma_bootstrap_t *bootstrap = data;
0779 int err;
0780 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
0781 const drm_mga_private_t *const dev_priv =
0782 (drm_mga_private_t *) dev->dev_private;
0783
0784 err = mga_do_dma_bootstrap(dev, bootstrap);
0785 if (err) {
0786 mga_do_cleanup_dma(dev, FULL_CLEANUP);
0787 return err;
0788 }
0789
0790 if (dev_priv->agp_textures != NULL) {
0791 bootstrap->texture_handle = dev_priv->agp_textures->offset;
0792 bootstrap->texture_size = dev_priv->agp_textures->size;
0793 } else {
0794 bootstrap->texture_handle = 0;
0795 bootstrap->texture_size = 0;
0796 }
0797
0798 bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
0799
0800 return err;
0801 }
0802
0803 static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
0804 {
0805 drm_mga_private_t *dev_priv;
0806 int ret;
0807 DRM_DEBUG("\n");
0808
0809 dev_priv = dev->dev_private;
0810
0811 if (init->sgram)
0812 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
0813 else
0814 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
0815 dev_priv->maccess = init->maccess;
0816
0817 dev_priv->fb_cpp = init->fb_cpp;
0818 dev_priv->front_offset = init->front_offset;
0819 dev_priv->front_pitch = init->front_pitch;
0820 dev_priv->back_offset = init->back_offset;
0821 dev_priv->back_pitch = init->back_pitch;
0822
0823 dev_priv->depth_cpp = init->depth_cpp;
0824 dev_priv->depth_offset = init->depth_offset;
0825 dev_priv->depth_pitch = init->depth_pitch;
0826
0827
0828
0829 dev_priv->texture_offset = init->texture_offset[0];
0830 dev_priv->texture_size = init->texture_size[0];
0831
0832 dev_priv->sarea = drm_legacy_getsarea(dev);
0833 if (!dev_priv->sarea) {
0834 DRM_ERROR("failed to find sarea!\n");
0835 return -EINVAL;
0836 }
0837
0838 if (!dev_priv->used_new_dma_init) {
0839
0840 dev_priv->dma_access = MGA_PAGPXFER;
0841 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
0842
0843 dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
0844 if (!dev_priv->status) {
0845 DRM_ERROR("failed to find status page!\n");
0846 return -EINVAL;
0847 }
0848 dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
0849 if (!dev_priv->mmio) {
0850 DRM_ERROR("failed to find mmio region!\n");
0851 return -EINVAL;
0852 }
0853 dev_priv->warp = drm_legacy_findmap(dev, init->warp_offset);
0854 if (!dev_priv->warp) {
0855 DRM_ERROR("failed to find warp microcode region!\n");
0856 return -EINVAL;
0857 }
0858 dev_priv->primary = drm_legacy_findmap(dev, init->primary_offset);
0859 if (!dev_priv->primary) {
0860 DRM_ERROR("failed to find primary dma region!\n");
0861 return -EINVAL;
0862 }
0863 dev->agp_buffer_token = init->buffers_offset;
0864 dev->agp_buffer_map =
0865 drm_legacy_findmap(dev, init->buffers_offset);
0866 if (!dev->agp_buffer_map) {
0867 DRM_ERROR("failed to find dma buffer region!\n");
0868 return -EINVAL;
0869 }
0870
0871 drm_legacy_ioremap(dev_priv->warp, dev);
0872 drm_legacy_ioremap(dev_priv->primary, dev);
0873 drm_legacy_ioremap(dev->agp_buffer_map, dev);
0874 }
0875
0876 dev_priv->sarea_priv =
0877 (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
0878 init->sarea_priv_offset);
0879
0880 if (!dev_priv->warp->handle ||
0881 !dev_priv->primary->handle ||
0882 ((dev_priv->dma_access != 0) &&
0883 ((dev->agp_buffer_map == NULL) ||
0884 (dev->agp_buffer_map->handle == NULL)))) {
0885 DRM_ERROR("failed to ioremap agp regions!\n");
0886 return -ENOMEM;
0887 }
0888
0889 ret = mga_warp_install_microcode(dev_priv);
0890 if (ret < 0) {
0891 DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
0892 return ret;
0893 }
0894
0895 ret = mga_warp_init(dev_priv);
0896 if (ret < 0) {
0897 DRM_ERROR("failed to init WARP engine!: %d\n", ret);
0898 return ret;
0899 }
0900
0901 dev_priv->prim.status = (u32 *) dev_priv->status->handle;
0902
0903 mga_do_wait_for_idle(dev_priv);
0904
0905
0906
0907 MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
0908 #if 0
0909 MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |
0910 MGA_PRIMPTREN1);
0911 #endif
0912
0913 dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
0914 dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
0915 + dev_priv->primary->size);
0916 dev_priv->prim.size = dev_priv->primary->size;
0917
0918 dev_priv->prim.tail = 0;
0919 dev_priv->prim.space = dev_priv->prim.size;
0920 dev_priv->prim.wrapped = 0;
0921
0922 dev_priv->prim.last_flush = 0;
0923 dev_priv->prim.last_wrap = 0;
0924
0925 dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
0926
0927 dev_priv->prim.status[0] = dev_priv->primary->offset;
0928 dev_priv->prim.status[1] = 0;
0929
0930 dev_priv->sarea_priv->last_wrap = 0;
0931 dev_priv->sarea_priv->last_frame.head = 0;
0932 dev_priv->sarea_priv->last_frame.wrap = 0;
0933
0934 if (mga_freelist_init(dev, dev_priv) < 0) {
0935 DRM_ERROR("could not initialize freelist\n");
0936 return -ENOMEM;
0937 }
0938
0939 return 0;
0940 }
0941
0942 static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
0943 {
0944 int err = 0;
0945 DRM_DEBUG("\n");
0946
0947
0948
0949
0950
0951 if (dev->irq_enabled)
0952 drm_legacy_irq_uninstall(dev);
0953
0954 if (dev->dev_private) {
0955 drm_mga_private_t *dev_priv = dev->dev_private;
0956
0957 if ((dev_priv->warp != NULL)
0958 && (dev_priv->warp->type != _DRM_CONSISTENT))
0959 drm_legacy_ioremapfree(dev_priv->warp, dev);
0960
0961 if ((dev_priv->primary != NULL)
0962 && (dev_priv->primary->type != _DRM_CONSISTENT))
0963 drm_legacy_ioremapfree(dev_priv->primary, dev);
0964
0965 if (dev->agp_buffer_map != NULL)
0966 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
0967
0968 if (dev_priv->used_new_dma_init) {
0969 #if IS_ENABLED(CONFIG_AGP)
0970 if (dev_priv->agp_handle != 0) {
0971 struct drm_agp_binding unbind_req;
0972 struct drm_agp_buffer free_req;
0973
0974 unbind_req.handle = dev_priv->agp_handle;
0975 drm_legacy_agp_unbind(dev, &unbind_req);
0976
0977 free_req.handle = dev_priv->agp_handle;
0978 drm_legacy_agp_free(dev, &free_req);
0979
0980 dev_priv->agp_textures = NULL;
0981 dev_priv->agp_size = 0;
0982 dev_priv->agp_handle = 0;
0983 }
0984
0985 if ((dev->agp != NULL) && dev->agp->acquired)
0986 err = drm_legacy_agp_release(dev);
0987 #endif
0988 }
0989
0990 dev_priv->warp = NULL;
0991 dev_priv->primary = NULL;
0992 dev_priv->sarea = NULL;
0993 dev_priv->sarea_priv = NULL;
0994 dev->agp_buffer_map = NULL;
0995
0996 if (full_cleanup) {
0997 dev_priv->mmio = NULL;
0998 dev_priv->status = NULL;
0999 dev_priv->used_new_dma_init = 0;
1000 }
1001
1002 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
1003 dev_priv->warp_pipe = 0;
1004 memset(dev_priv->warp_pipe_phys, 0,
1005 sizeof(dev_priv->warp_pipe_phys));
1006
1007 if (dev_priv->head != NULL)
1008 mga_freelist_cleanup(dev);
1009 }
1010
1011 return err;
1012 }
1013
1014 int mga_dma_init(struct drm_device *dev, void *data,
1015 struct drm_file *file_priv)
1016 {
1017 drm_mga_init_t *init = data;
1018 int err;
1019
1020 LOCK_TEST_WITH_RETURN(dev, file_priv);
1021
1022 switch (init->func) {
1023 case MGA_INIT_DMA:
1024 err = mga_do_init_dma(dev, init);
1025 if (err)
1026 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1027 return err;
1028 case MGA_CLEANUP_DMA:
1029 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1030 }
1031
1032 return -EINVAL;
1033 }
1034
1035
1036
1037
1038
1039 int mga_dma_flush(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv)
1041 {
1042 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1043 struct drm_lock *lock = data;
1044
1045 LOCK_TEST_WITH_RETURN(dev, file_priv);
1046
1047 DRM_DEBUG("%s%s%s\n",
1048 (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1049 (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1050 (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1051
1052 WRAP_WAIT_WITH_RETURN(dev_priv);
1053
1054 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
1055 mga_do_dma_flush(dev_priv);
1056
1057 if (lock->flags & _DRM_LOCK_QUIESCENT) {
1058 #if MGA_DMA_DEBUG
1059 int ret = mga_do_wait_for_idle(dev_priv);
1060 if (ret < 0)
1061 DRM_INFO("-EBUSY\n");
1062 return ret;
1063 #else
1064 return mga_do_wait_for_idle(dev_priv);
1065 #endif
1066 } else {
1067 return 0;
1068 }
1069 }
1070
1071 int mga_dma_reset(struct drm_device *dev, void *data,
1072 struct drm_file *file_priv)
1073 {
1074 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1075
1076 LOCK_TEST_WITH_RETURN(dev, file_priv);
1077
1078 return mga_do_dma_reset(dev_priv);
1079 }
1080
1081
1082
1083
1084
1085 static int mga_dma_get_buffers(struct drm_device *dev,
1086 struct drm_file *file_priv, struct drm_dma *d)
1087 {
1088 struct drm_buf *buf;
1089 int i;
1090
1091 for (i = d->granted_count; i < d->request_count; i++) {
1092 buf = mga_freelist_get(dev);
1093 if (!buf)
1094 return -EAGAIN;
1095
1096 buf->file_priv = file_priv;
1097
1098 if (copy_to_user(&d->request_indices[i],
1099 &buf->idx, sizeof(buf->idx)))
1100 return -EFAULT;
1101 if (copy_to_user(&d->request_sizes[i],
1102 &buf->total, sizeof(buf->total)))
1103 return -EFAULT;
1104
1105 d->granted_count++;
1106 }
1107 return 0;
1108 }
1109
1110 int mga_dma_buffers(struct drm_device *dev, void *data,
1111 struct drm_file *file_priv)
1112 {
1113 struct drm_device_dma *dma = dev->dma;
1114 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1115 struct drm_dma *d = data;
1116 int ret = 0;
1117
1118 LOCK_TEST_WITH_RETURN(dev, file_priv);
1119
1120
1121
1122 if (d->send_count != 0) {
1123 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1124 task_pid_nr(current), d->send_count);
1125 return -EINVAL;
1126 }
1127
1128
1129
1130 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1131 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1132 task_pid_nr(current), d->request_count,
1133 dma->buf_count);
1134 return -EINVAL;
1135 }
1136
1137 WRAP_TEST_WITH_RETURN(dev_priv);
1138
1139 d->granted_count = 0;
1140
1141 if (d->request_count)
1142 ret = mga_dma_get_buffers(dev, file_priv, d);
1143
1144 return ret;
1145 }
1146
1147
1148
1149
1150 void mga_driver_unload(struct drm_device *dev)
1151 {
1152 kfree(dev->dev_private);
1153 dev->dev_private = NULL;
1154 }
1155
1156
1157
1158
1159 void mga_driver_lastclose(struct drm_device *dev)
1160 {
1161 mga_do_cleanup_dma(dev, FULL_CLEANUP);
1162 }
1163
1164 int mga_driver_dma_quiescent(struct drm_device *dev)
1165 {
1166 drm_mga_private_t *dev_priv = dev->dev_private;
1167 return mga_do_wait_for_idle(dev_priv);
1168 }