0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include <linux/delay.h>
0038 #include <linux/uaccess.h>
0039
0040 #include <drm/drm.h>
0041 #include <drm/drm_device.h>
0042 #include <drm/drm_file.h>
0043 #include <drm/via_drm.h>
0044
0045 #include "via_drv.h"
0046 #include "via_3d_reg.h"
0047
0048 #define CMDBUF_ALIGNMENT_SIZE (0x100)
0049 #define CMDBUF_ALIGNMENT_MASK (0x0ff)
0050
0051
0052 #define VIA_REG_STATUS 0x400
0053 #define VIA_REG_TRANSET 0x43C
0054 #define VIA_REG_TRANSPACE 0x440
0055
0056
0057 #define VIA_CMD_RGTR_BUSY 0x00000080
0058 #define VIA_2D_ENG_BUSY 0x00000001
0059 #define VIA_3D_ENG_BUSY 0x00000002
0060 #define VIA_VR_QUEUE_BUSY 0x00020000
0061
0062 #define SetReg2DAGP(nReg, nData) { \
0063 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
0064 *((uint32_t *)(vb) + 1) = (nData); \
0065 vb = ((uint32_t *)vb) + 2; \
0066 dev_priv->dma_low += 8; \
0067 }
0068
0069 #define via_flush_write_combine() mb()
0070
0071 #define VIA_OUT_RING_QW(w1, w2) do { \
0072 *vb++ = (w1); \
0073 *vb++ = (w2); \
0074 dev_priv->dma_low += 8; \
0075 } while (0)
0076
0077 static void via_cmdbuf_start(drm_via_private_t *dev_priv);
0078 static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
0079 static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
0080 static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
0081 static int via_wait_idle(drm_via_private_t *dev_priv);
0082 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
0083
0084
0085
0086
0087
0088 static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
0089 {
0090 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
0091 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
0092
0093 return ((hw_addr <= dev_priv->dma_low) ?
0094 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
0095 (hw_addr - dev_priv->dma_low));
0096 }
0097
0098
0099
0100
0101
0102 static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
0103 {
0104 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
0105 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
0106
0107 return ((hw_addr <= dev_priv->dma_low) ?
0108 (dev_priv->dma_low - hw_addr) :
0109 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
0110 }
0111
0112
0113
0114
0115
0116 static inline int
0117 via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
0118 {
0119 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
0120 uint32_t cur_addr, hw_addr, next_addr;
0121 volatile uint32_t *hw_addr_ptr;
0122 uint32_t count;
0123 hw_addr_ptr = dev_priv->hw_addr_ptr;
0124 cur_addr = dev_priv->dma_low;
0125 next_addr = cur_addr + size + 512 * 1024;
0126 count = 1000000;
0127 do {
0128 hw_addr = *hw_addr_ptr - agp_base;
0129 if (count-- == 0) {
0130 DRM_ERROR
0131 ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
0132 hw_addr, cur_addr, next_addr);
0133 return -1;
0134 }
0135 if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
0136 msleep(1);
0137 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
0138 return 0;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148 static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
0149 unsigned int size)
0150 {
0151 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
0152 dev_priv->dma_high) {
0153 via_cmdbuf_rewind(dev_priv);
0154 }
0155 if (via_cmdbuf_wait(dev_priv, size) != 0)
0156 return NULL;
0157
0158 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
0159 }
0160
0161 int via_dma_cleanup(struct drm_device *dev)
0162 {
0163 if (dev->dev_private) {
0164 drm_via_private_t *dev_priv =
0165 (drm_via_private_t *) dev->dev_private;
0166
0167 if (dev_priv->ring.virtual_start) {
0168 via_cmdbuf_reset(dev_priv);
0169
0170 drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
0171 dev_priv->ring.virtual_start = NULL;
0172 }
0173
0174 }
0175
0176 return 0;
0177 }
0178
0179 static int via_initialize(struct drm_device *dev,
0180 drm_via_private_t *dev_priv,
0181 drm_via_dma_init_t *init)
0182 {
0183 if (!dev_priv || !dev_priv->mmio) {
0184 DRM_ERROR("via_dma_init called before via_map_init\n");
0185 return -EFAULT;
0186 }
0187
0188 if (dev_priv->ring.virtual_start != NULL) {
0189 DRM_ERROR("called again without calling cleanup\n");
0190 return -EFAULT;
0191 }
0192
0193 if (!dev->agp || !dev->agp->base) {
0194 DRM_ERROR("called with no agp memory available\n");
0195 return -EFAULT;
0196 }
0197
0198 if (dev_priv->chipset == VIA_DX9_0) {
0199 DRM_ERROR("AGP DMA is not supported on this chip\n");
0200 return -EINVAL;
0201 }
0202
0203 dev_priv->ring.map.offset = dev->agp->base + init->offset;
0204 dev_priv->ring.map.size = init->size;
0205 dev_priv->ring.map.type = 0;
0206 dev_priv->ring.map.flags = 0;
0207 dev_priv->ring.map.mtrr = 0;
0208
0209 drm_legacy_ioremap(&dev_priv->ring.map, dev);
0210
0211 if (dev_priv->ring.map.handle == NULL) {
0212 via_dma_cleanup(dev);
0213 DRM_ERROR("can not ioremap virtual address for"
0214 " ring buffer\n");
0215 return -ENOMEM;
0216 }
0217
0218 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
0219
0220 dev_priv->dma_ptr = dev_priv->ring.virtual_start;
0221 dev_priv->dma_low = 0;
0222 dev_priv->dma_high = init->size;
0223 dev_priv->dma_wrap = init->size;
0224 dev_priv->dma_offset = init->offset;
0225 dev_priv->last_pause_ptr = NULL;
0226 dev_priv->hw_addr_ptr =
0227 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
0228 init->reg_pause_addr);
0229
0230 via_cmdbuf_start(dev_priv);
0231
0232 return 0;
0233 }
0234
0235 static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
0236 {
0237 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
0238 drm_via_dma_init_t *init = data;
0239 int retcode = 0;
0240
0241 switch (init->func) {
0242 case VIA_INIT_DMA:
0243 if (!capable(CAP_SYS_ADMIN))
0244 retcode = -EPERM;
0245 else
0246 retcode = via_initialize(dev, dev_priv, init);
0247 break;
0248 case VIA_CLEANUP_DMA:
0249 if (!capable(CAP_SYS_ADMIN))
0250 retcode = -EPERM;
0251 else
0252 retcode = via_dma_cleanup(dev);
0253 break;
0254 case VIA_DMA_INITIALIZED:
0255 retcode = (dev_priv->ring.virtual_start != NULL) ?
0256 0 : -EFAULT;
0257 break;
0258 default:
0259 retcode = -EINVAL;
0260 break;
0261 }
0262
0263 return retcode;
0264 }
0265
0266 static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
0267 {
0268 drm_via_private_t *dev_priv;
0269 uint32_t *vb;
0270 int ret;
0271
0272 dev_priv = (drm_via_private_t *) dev->dev_private;
0273
0274 if (dev_priv->ring.virtual_start == NULL) {
0275 DRM_ERROR("called without initializing AGP ring buffer.\n");
0276 return -EFAULT;
0277 }
0278
0279 if (cmd->size > VIA_PCI_BUF_SIZE)
0280 return -ENOMEM;
0281
0282 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
0283 return -EFAULT;
0284
0285
0286
0287
0288
0289
0290
0291 if ((ret =
0292 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
0293 cmd->size, dev, 1))) {
0294 return ret;
0295 }
0296
0297 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
0298 if (vb == NULL)
0299 return -EAGAIN;
0300
0301 memcpy(vb, dev_priv->pci_buf, cmd->size);
0302
0303 dev_priv->dma_low += cmd->size;
0304
0305
0306
0307
0308
0309
0310 if (cmd->size < 0x100)
0311 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
0312 via_cmdbuf_pause(dev_priv);
0313
0314 return 0;
0315 }
0316
0317 int via_driver_dma_quiescent(struct drm_device *dev)
0318 {
0319 drm_via_private_t *dev_priv = dev->dev_private;
0320
0321 if (!via_wait_idle(dev_priv))
0322 return -EBUSY;
0323 return 0;
0324 }
0325
0326 static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
0327 {
0328
0329 LOCK_TEST_WITH_RETURN(dev, file_priv);
0330
0331 return via_driver_dma_quiescent(dev);
0332 }
0333
0334 static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
0335 {
0336 drm_via_cmdbuffer_t *cmdbuf = data;
0337 int ret;
0338
0339 LOCK_TEST_WITH_RETURN(dev, file_priv);
0340
0341 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
0342
0343 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
0344 return ret;
0345 }
0346
0347 static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
0348 drm_via_cmdbuffer_t *cmd)
0349 {
0350 drm_via_private_t *dev_priv = dev->dev_private;
0351 int ret;
0352
0353 if (cmd->size > VIA_PCI_BUF_SIZE)
0354 return -ENOMEM;
0355 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
0356 return -EFAULT;
0357
0358 if ((ret =
0359 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
0360 cmd->size, dev, 0))) {
0361 return ret;
0362 }
0363
0364 ret =
0365 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
0366 cmd->size);
0367 return ret;
0368 }
0369
0370 static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
0371 {
0372 drm_via_cmdbuffer_t *cmdbuf = data;
0373 int ret;
0374
0375 LOCK_TEST_WITH_RETURN(dev, file_priv);
0376
0377 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
0378
0379 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
0380 return ret;
0381 }
0382
0383 static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
0384 uint32_t * vb, int qw_count)
0385 {
0386 for (; qw_count > 0; --qw_count)
0387 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
0388 return vb;
0389 }
0390
0391
0392
0393
0394
0395
0396 static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
0397 {
0398 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
0399 }
0400
0401
0402
0403
0404
0405
0406 static int via_hook_segment(drm_via_private_t *dev_priv,
0407 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
0408 int no_pci_fire)
0409 {
0410 int paused, count;
0411 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
0412 uint32_t reader, ptr;
0413 uint32_t diff;
0414
0415 paused = 0;
0416 via_flush_write_combine();
0417 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
0418
0419 *paused_at = pause_addr_lo;
0420 via_flush_write_combine();
0421 (void) *paused_at;
0422
0423 reader = *(dev_priv->hw_addr_ptr);
0424 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
0425 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
0426
0427 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
0428
0429
0430
0431
0432
0433
0434
0435
0436 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
0437 count = 10000000;
0438 while (diff == 0 && count--) {
0439 paused = (via_read(dev_priv, 0x41c) & 0x80000000);
0440 if (paused)
0441 break;
0442 reader = *(dev_priv->hw_addr_ptr);
0443 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
0444 }
0445
0446 paused = via_read(dev_priv, 0x41c) & 0x80000000;
0447
0448 if (paused && !no_pci_fire) {
0449 reader = *(dev_priv->hw_addr_ptr);
0450 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
0451 diff &= (dev_priv->dma_high - 1);
0452 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
0453 DRM_ERROR("Paused at incorrect address. "
0454 "0x%08x, 0x%08x 0x%08x\n",
0455 ptr, reader, dev_priv->dma_diff);
0456 } else if (diff == 0) {
0457
0458
0459
0460
0461
0462
0463 via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
0464 via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
0465 via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
0466 via_read(dev_priv, VIA_REG_TRANSPACE);
0467 }
0468 }
0469 return paused;
0470 }
0471
0472 static int via_wait_idle(drm_via_private_t *dev_priv)
0473 {
0474 int count = 10000000;
0475
0476 while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
0477 ;
0478
0479 while (count && (via_read(dev_priv, VIA_REG_STATUS) &
0480 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
0481 VIA_3D_ENG_BUSY)))
0482 --count;
0483 return count;
0484 }
0485
0486 static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
0487 uint32_t addr, uint32_t *cmd_addr_hi,
0488 uint32_t *cmd_addr_lo, int skip_wait)
0489 {
0490 uint32_t agp_base;
0491 uint32_t cmd_addr, addr_lo, addr_hi;
0492 uint32_t *vb;
0493 uint32_t qw_pad_count;
0494
0495 if (!skip_wait)
0496 via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
0497
0498 vb = via_get_dma(dev_priv);
0499 VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
0500 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
0501 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
0502 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
0503 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
0504
0505 cmd_addr = (addr) ? addr :
0506 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
0507 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
0508 (cmd_addr & HC_HAGPBpL_MASK));
0509 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
0510
0511 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
0512 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
0513 return vb;
0514 }
0515
0516 static void via_cmdbuf_start(drm_via_private_t *dev_priv)
0517 {
0518 uint32_t pause_addr_lo, pause_addr_hi;
0519 uint32_t start_addr, start_addr_lo;
0520 uint32_t end_addr, end_addr_lo;
0521 uint32_t command;
0522 uint32_t agp_base;
0523 uint32_t ptr;
0524 uint32_t reader;
0525 int count;
0526
0527 dev_priv->dma_low = 0;
0528
0529 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
0530 start_addr = agp_base;
0531 end_addr = agp_base + dev_priv->dma_high;
0532
0533 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
0534 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
0535 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
0536 ((end_addr & 0xff000000) >> 16));
0537
0538 dev_priv->last_pause_ptr =
0539 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
0540 &pause_addr_hi, &pause_addr_lo, 1) - 1;
0541
0542 via_flush_write_combine();
0543 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
0544
0545 via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
0546 via_write(dev_priv, VIA_REG_TRANSPACE, command);
0547 via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
0548 via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
0549
0550 via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
0551 via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
0552 wmb();
0553 via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
0554 via_read(dev_priv, VIA_REG_TRANSPACE);
0555
0556 dev_priv->dma_diff = 0;
0557
0558 count = 10000000;
0559 while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
0560
0561 reader = *(dev_priv->hw_addr_ptr);
0562 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
0563 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
0564
0565
0566
0567
0568
0569
0570
0571
0572 dev_priv->dma_diff = ptr - reader;
0573 }
0574
0575 static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
0576 {
0577 uint32_t *vb;
0578
0579 via_cmdbuf_wait(dev_priv, qwords + 2);
0580 vb = via_get_dma(dev_priv);
0581 VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
0582 via_align_buffer(dev_priv, vb, qwords);
0583 }
0584
0585 static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
0586 {
0587 uint32_t *vb = via_get_dma(dev_priv);
0588 SetReg2DAGP(0x0C, (0 | (0 << 16)));
0589 SetReg2DAGP(0x10, 0 | (0 << 16));
0590 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
0591 }
0592
0593 static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
0594 {
0595 uint32_t pause_addr_lo, pause_addr_hi;
0596 uint32_t jump_addr_lo, jump_addr_hi;
0597 volatile uint32_t *last_pause_ptr;
0598 uint32_t dma_low_save1, dma_low_save2;
0599
0600 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
0601 &jump_addr_lo, 0);
0602
0603 dev_priv->dma_wrap = dev_priv->dma_low;
0604
0605
0606
0607
0608
0609 dev_priv->dma_low = 0;
0610 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
0611 DRM_ERROR("via_cmdbuf_jump failed\n");
0612
0613 via_dummy_bitblt(dev_priv);
0614 via_dummy_bitblt(dev_priv);
0615
0616 last_pause_ptr =
0617 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
0618 &pause_addr_lo, 0) - 1;
0619 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
0620 &pause_addr_lo, 0);
0621
0622 *last_pause_ptr = pause_addr_lo;
0623 dma_low_save1 = dev_priv->dma_low;
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 last_pause_ptr =
0635 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
0636 &pause_addr_lo, 0) - 1;
0637 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
0638 &pause_addr_lo, 0);
0639 *last_pause_ptr = pause_addr_lo;
0640
0641 dma_low_save2 = dev_priv->dma_low;
0642 dev_priv->dma_low = dma_low_save1;
0643 via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
0644 dev_priv->dma_low = dma_low_save2;
0645 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
0646 }
0647
0648
0649 static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
0650 {
0651 via_cmdbuf_jump(dev_priv);
0652 }
0653
0654 static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
0655 {
0656 uint32_t pause_addr_lo, pause_addr_hi;
0657
0658 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
0659 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
0660 }
0661
0662 static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
0663 {
0664 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
0665 }
0666
0667 static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
0668 {
0669 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
0670 via_wait_idle(dev_priv);
0671 }
0672
0673
0674
0675
0676
0677 static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
0678 {
0679 drm_via_cmdbuf_size_t *d_siz = data;
0680 int ret = 0;
0681 uint32_t tmp_size, count;
0682 drm_via_private_t *dev_priv;
0683
0684 DRM_DEBUG("\n");
0685 LOCK_TEST_WITH_RETURN(dev, file_priv);
0686
0687 dev_priv = (drm_via_private_t *) dev->dev_private;
0688
0689 if (dev_priv->ring.virtual_start == NULL) {
0690 DRM_ERROR("called without initializing AGP ring buffer.\n");
0691 return -EFAULT;
0692 }
0693
0694 count = 1000000;
0695 tmp_size = d_siz->size;
0696 switch (d_siz->func) {
0697 case VIA_CMDBUF_SPACE:
0698 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
0699 && --count) {
0700 if (!d_siz->wait)
0701 break;
0702 }
0703 if (!count) {
0704 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
0705 ret = -EAGAIN;
0706 }
0707 break;
0708 case VIA_CMDBUF_LAG:
0709 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
0710 && --count) {
0711 if (!d_siz->wait)
0712 break;
0713 }
0714 if (!count) {
0715 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
0716 ret = -EAGAIN;
0717 }
0718 break;
0719 default:
0720 ret = -EFAULT;
0721 }
0722 d_siz->size = tmp_size;
0723
0724 return ret;
0725 }
0726
0727 const struct drm_ioctl_desc via_ioctls[] = {
0728 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
0729 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
0730 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
0731 DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
0732 DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
0733 DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
0734 DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
0735 DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
0736 DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
0737 DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
0738 DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
0739 DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
0740 DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
0741 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
0742 };
0743
0744 int via_max_ioctl = ARRAY_SIZE(via_ioctls);