0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/delay.h>
0034 #include <linux/mman.h>
0035 #include <linux/pci.h>
0036
0037 #include <drm/drm_device.h>
0038 #include <drm/drm_drv.h>
0039 #include <drm/drm_file.h>
0040 #include <drm/drm_ioctl.h>
0041 #include <drm/drm_print.h>
0042 #include <drm/i810_drm.h>
0043
0044 #include "i810_drv.h"
0045
0046 #define I810_BUF_FREE 2
0047 #define I810_BUF_CLIENT 1
0048 #define I810_BUF_HARDWARE 0
0049
0050 #define I810_BUF_UNMAPPED 0
0051 #define I810_BUF_MAPPED 1
0052
0053 static struct drm_buf *i810_freelist_get(struct drm_device * dev)
0054 {
0055 struct drm_device_dma *dma = dev->dma;
0056 int i;
0057 int used;
0058
0059
0060
0061 for (i = 0; i < dma->buf_count; i++) {
0062 struct drm_buf *buf = dma->buflist[i];
0063 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0064
0065 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
0066 I810_BUF_CLIENT);
0067 if (used == I810_BUF_FREE)
0068 return buf;
0069 }
0070 return NULL;
0071 }
0072
0073
0074
0075
0076
0077 static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
0078 {
0079 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0080 int used;
0081
0082
0083 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
0084 if (used != I810_BUF_CLIENT) {
0085 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
0086 return -EINVAL;
0087 }
0088
0089 return 0;
0090 }
0091
0092 static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
0093 {
0094 struct drm_file *priv = filp->private_data;
0095 struct drm_device *dev;
0096 drm_i810_private_t *dev_priv;
0097 struct drm_buf *buf;
0098 drm_i810_buf_priv_t *buf_priv;
0099
0100 dev = priv->minor->dev;
0101 dev_priv = dev->dev_private;
0102 buf = dev_priv->mmap_buffer;
0103 buf_priv = buf->dev_private;
0104
0105 vma->vm_flags |= VM_DONTCOPY;
0106
0107 buf_priv->currently_mapped = I810_BUF_MAPPED;
0108
0109 if (io_remap_pfn_range(vma, vma->vm_start,
0110 vma->vm_pgoff,
0111 vma->vm_end - vma->vm_start, vma->vm_page_prot))
0112 return -EAGAIN;
0113 return 0;
0114 }
0115
0116 static const struct file_operations i810_buffer_fops = {
0117 .open = drm_open,
0118 .release = drm_release,
0119 .unlocked_ioctl = drm_ioctl,
0120 .mmap = i810_mmap_buffers,
0121 .compat_ioctl = drm_compat_ioctl,
0122 .llseek = noop_llseek,
0123 };
0124
0125 static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
0126 {
0127 struct drm_device *dev = file_priv->minor->dev;
0128 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0129 drm_i810_private_t *dev_priv = dev->dev_private;
0130 const struct file_operations *old_fops;
0131 int retcode = 0;
0132
0133 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
0134 return -EINVAL;
0135
0136
0137 old_fops = file_priv->filp->f_op;
0138 file_priv->filp->f_op = &i810_buffer_fops;
0139 dev_priv->mmap_buffer = buf;
0140 buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
0141 PROT_READ | PROT_WRITE,
0142 MAP_SHARED, buf->bus_address);
0143 dev_priv->mmap_buffer = NULL;
0144 file_priv->filp->f_op = old_fops;
0145 if (IS_ERR(buf_priv->virtual)) {
0146
0147 DRM_ERROR("mmap error\n");
0148 retcode = PTR_ERR(buf_priv->virtual);
0149 buf_priv->virtual = NULL;
0150 }
0151
0152 return retcode;
0153 }
0154
0155 static int i810_unmap_buffer(struct drm_buf *buf)
0156 {
0157 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0158 int retcode = 0;
0159
0160 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
0161 return -EINVAL;
0162
0163 retcode = vm_munmap((unsigned long)buf_priv->virtual,
0164 (size_t) buf->total);
0165
0166 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
0167 buf_priv->virtual = NULL;
0168
0169 return retcode;
0170 }
0171
0172 static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
0173 struct drm_file *file_priv)
0174 {
0175 struct drm_buf *buf;
0176 drm_i810_buf_priv_t *buf_priv;
0177 int retcode = 0;
0178
0179 buf = i810_freelist_get(dev);
0180 if (!buf) {
0181 retcode = -ENOMEM;
0182 DRM_DEBUG("retcode=%d\n", retcode);
0183 return retcode;
0184 }
0185
0186 retcode = i810_map_buffer(buf, file_priv);
0187 if (retcode) {
0188 i810_freelist_put(dev, buf);
0189 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
0190 return retcode;
0191 }
0192 buf->file_priv = file_priv;
0193 buf_priv = buf->dev_private;
0194 d->granted = 1;
0195 d->request_idx = buf->idx;
0196 d->request_size = buf->total;
0197 d->virtual = buf_priv->virtual;
0198
0199 return retcode;
0200 }
0201
0202 static int i810_dma_cleanup(struct drm_device *dev)
0203 {
0204 struct drm_device_dma *dma = dev->dma;
0205
0206
0207
0208
0209
0210 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
0211 drm_legacy_irq_uninstall(dev);
0212
0213 if (dev->dev_private) {
0214 int i;
0215 drm_i810_private_t *dev_priv =
0216 (drm_i810_private_t *) dev->dev_private;
0217
0218 if (dev_priv->ring.virtual_start)
0219 drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
0220 if (dev_priv->hw_status_page) {
0221 dma_free_coherent(dev->dev, PAGE_SIZE,
0222 dev_priv->hw_status_page,
0223 dev_priv->dma_status_page);
0224 }
0225 kfree(dev->dev_private);
0226 dev->dev_private = NULL;
0227
0228 for (i = 0; i < dma->buf_count; i++) {
0229 struct drm_buf *buf = dma->buflist[i];
0230 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0231
0232 if (buf_priv->kernel_virtual && buf->total)
0233 drm_legacy_ioremapfree(&buf_priv->map, dev);
0234 }
0235 }
0236 return 0;
0237 }
0238
0239 static int i810_wait_ring(struct drm_device *dev, int n)
0240 {
0241 drm_i810_private_t *dev_priv = dev->dev_private;
0242 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
0243 int iters = 0;
0244 unsigned long end;
0245 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
0246
0247 end = jiffies + (HZ * 3);
0248 while (ring->space < n) {
0249 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
0250 ring->space = ring->head - (ring->tail + 8);
0251 if (ring->space < 0)
0252 ring->space += ring->Size;
0253
0254 if (ring->head != last_head) {
0255 end = jiffies + (HZ * 3);
0256 last_head = ring->head;
0257 }
0258
0259 iters++;
0260 if (time_before(end, jiffies)) {
0261 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
0262 DRM_ERROR("lockup\n");
0263 goto out_wait_ring;
0264 }
0265 udelay(1);
0266 }
0267
0268 out_wait_ring:
0269 return iters;
0270 }
0271
0272 static void i810_kernel_lost_context(struct drm_device *dev)
0273 {
0274 drm_i810_private_t *dev_priv = dev->dev_private;
0275 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
0276
0277 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
0278 ring->tail = I810_READ(LP_RING + RING_TAIL);
0279 ring->space = ring->head - (ring->tail + 8);
0280 if (ring->space < 0)
0281 ring->space += ring->Size;
0282 }
0283
0284 static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
0285 {
0286 struct drm_device_dma *dma = dev->dma;
0287 int my_idx = 24;
0288 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
0289 int i;
0290
0291 if (dma->buf_count > 1019) {
0292
0293 return -EINVAL;
0294 }
0295
0296 for (i = 0; i < dma->buf_count; i++) {
0297 struct drm_buf *buf = dma->buflist[i];
0298 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0299
0300 buf_priv->in_use = hw_status++;
0301 buf_priv->my_use_idx = my_idx;
0302 my_idx += 4;
0303
0304 *buf_priv->in_use = I810_BUF_FREE;
0305
0306 buf_priv->map.offset = buf->bus_address;
0307 buf_priv->map.size = buf->total;
0308 buf_priv->map.type = _DRM_AGP;
0309 buf_priv->map.flags = 0;
0310 buf_priv->map.mtrr = 0;
0311
0312 drm_legacy_ioremap(&buf_priv->map, dev);
0313 buf_priv->kernel_virtual = buf_priv->map.handle;
0314
0315 }
0316 return 0;
0317 }
0318
0319 static int i810_dma_initialize(struct drm_device *dev,
0320 drm_i810_private_t *dev_priv,
0321 drm_i810_init_t *init)
0322 {
0323 struct drm_map_list *r_list;
0324 memset(dev_priv, 0, sizeof(drm_i810_private_t));
0325
0326 list_for_each_entry(r_list, &dev->maplist, head) {
0327 if (r_list->map &&
0328 r_list->map->type == _DRM_SHM &&
0329 r_list->map->flags & _DRM_CONTAINS_LOCK) {
0330 dev_priv->sarea_map = r_list->map;
0331 break;
0332 }
0333 }
0334 if (!dev_priv->sarea_map) {
0335 dev->dev_private = (void *)dev_priv;
0336 i810_dma_cleanup(dev);
0337 DRM_ERROR("can not find sarea!\n");
0338 return -EINVAL;
0339 }
0340 dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
0341 if (!dev_priv->mmio_map) {
0342 dev->dev_private = (void *)dev_priv;
0343 i810_dma_cleanup(dev);
0344 DRM_ERROR("can not find mmio map!\n");
0345 return -EINVAL;
0346 }
0347 dev->agp_buffer_token = init->buffers_offset;
0348 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
0349 if (!dev->agp_buffer_map) {
0350 dev->dev_private = (void *)dev_priv;
0351 i810_dma_cleanup(dev);
0352 DRM_ERROR("can not find dma buffer map!\n");
0353 return -EINVAL;
0354 }
0355
0356 dev_priv->sarea_priv = (drm_i810_sarea_t *)
0357 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
0358
0359 dev_priv->ring.Start = init->ring_start;
0360 dev_priv->ring.End = init->ring_end;
0361 dev_priv->ring.Size = init->ring_size;
0362
0363 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
0364 dev_priv->ring.map.size = init->ring_size;
0365 dev_priv->ring.map.type = _DRM_AGP;
0366 dev_priv->ring.map.flags = 0;
0367 dev_priv->ring.map.mtrr = 0;
0368
0369 drm_legacy_ioremap(&dev_priv->ring.map, dev);
0370
0371 if (dev_priv->ring.map.handle == NULL) {
0372 dev->dev_private = (void *)dev_priv;
0373 i810_dma_cleanup(dev);
0374 DRM_ERROR("can not ioremap virtual address for"
0375 " ring buffer\n");
0376 return -ENOMEM;
0377 }
0378
0379 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
0380
0381 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
0382
0383 dev_priv->w = init->w;
0384 dev_priv->h = init->h;
0385 dev_priv->pitch = init->pitch;
0386 dev_priv->back_offset = init->back_offset;
0387 dev_priv->depth_offset = init->depth_offset;
0388 dev_priv->front_offset = init->front_offset;
0389
0390 dev_priv->overlay_offset = init->overlay_offset;
0391 dev_priv->overlay_physical = init->overlay_physical;
0392
0393 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
0394 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
0395 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
0396
0397
0398 dev_priv->hw_status_page =
0399 dma_alloc_coherent(dev->dev, PAGE_SIZE,
0400 &dev_priv->dma_status_page, GFP_KERNEL);
0401 if (!dev_priv->hw_status_page) {
0402 dev->dev_private = (void *)dev_priv;
0403 i810_dma_cleanup(dev);
0404 DRM_ERROR("Can not allocate hardware status page\n");
0405 return -ENOMEM;
0406 }
0407 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
0408
0409 I810_WRITE(0x02080, dev_priv->dma_status_page);
0410 DRM_DEBUG("Enabled hardware status page\n");
0411
0412
0413 if (i810_freelist_init(dev, dev_priv) != 0) {
0414 dev->dev_private = (void *)dev_priv;
0415 i810_dma_cleanup(dev);
0416 DRM_ERROR("Not enough space in the status page for"
0417 " the freelist\n");
0418 return -ENOMEM;
0419 }
0420 dev->dev_private = (void *)dev_priv;
0421
0422 return 0;
0423 }
0424
0425 static int i810_dma_init(struct drm_device *dev, void *data,
0426 struct drm_file *file_priv)
0427 {
0428 drm_i810_private_t *dev_priv;
0429 drm_i810_init_t *init = data;
0430 int retcode = 0;
0431
0432 switch (init->func) {
0433 case I810_INIT_DMA_1_4:
0434 DRM_INFO("Using v1.4 init.\n");
0435 dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
0436 if (dev_priv == NULL)
0437 return -ENOMEM;
0438 retcode = i810_dma_initialize(dev, dev_priv, init);
0439 break;
0440
0441 case I810_CLEANUP_DMA:
0442 DRM_INFO("DMA Cleanup\n");
0443 retcode = i810_dma_cleanup(dev);
0444 break;
0445 default:
0446 return -EINVAL;
0447 }
0448
0449 return retcode;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458 static void i810EmitContextVerified(struct drm_device *dev,
0459 volatile unsigned int *code)
0460 {
0461 drm_i810_private_t *dev_priv = dev->dev_private;
0462 int i, j = 0;
0463 unsigned int tmp;
0464 RING_LOCALS;
0465
0466 BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
0467
0468 OUT_RING(GFX_OP_COLOR_FACTOR);
0469 OUT_RING(code[I810_CTXREG_CF1]);
0470
0471 OUT_RING(GFX_OP_STIPPLE);
0472 OUT_RING(code[I810_CTXREG_ST1]);
0473
0474 for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
0475 tmp = code[i];
0476
0477 if ((tmp & (7 << 29)) == (3 << 29) &&
0478 (tmp & (0x1f << 24)) < (0x1d << 24)) {
0479 OUT_RING(tmp);
0480 j++;
0481 } else
0482 printk("constext state dropped!!!\n");
0483 }
0484
0485 if (j & 1)
0486 OUT_RING(0);
0487
0488 ADVANCE_LP_RING();
0489 }
0490
0491 static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
0492 {
0493 drm_i810_private_t *dev_priv = dev->dev_private;
0494 int i, j = 0;
0495 unsigned int tmp;
0496 RING_LOCALS;
0497
0498 BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
0499
0500 OUT_RING(GFX_OP_MAP_INFO);
0501 OUT_RING(code[I810_TEXREG_MI1]);
0502 OUT_RING(code[I810_TEXREG_MI2]);
0503 OUT_RING(code[I810_TEXREG_MI3]);
0504
0505 for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
0506 tmp = code[i];
0507
0508 if ((tmp & (7 << 29)) == (3 << 29) &&
0509 (tmp & (0x1f << 24)) < (0x1d << 24)) {
0510 OUT_RING(tmp);
0511 j++;
0512 } else
0513 printk("texture state dropped!!!\n");
0514 }
0515
0516 if (j & 1)
0517 OUT_RING(0);
0518
0519 ADVANCE_LP_RING();
0520 }
0521
0522
0523
0524 static void i810EmitDestVerified(struct drm_device *dev,
0525 volatile unsigned int *code)
0526 {
0527 drm_i810_private_t *dev_priv = dev->dev_private;
0528 unsigned int tmp;
0529 RING_LOCALS;
0530
0531 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
0532
0533 tmp = code[I810_DESTREG_DI1];
0534 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
0535 OUT_RING(CMD_OP_DESTBUFFER_INFO);
0536 OUT_RING(tmp);
0537 } else
0538 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
0539 tmp, dev_priv->front_di1, dev_priv->back_di1);
0540
0541
0542
0543 OUT_RING(CMD_OP_Z_BUFFER_INFO);
0544 OUT_RING(dev_priv->zi1);
0545
0546 OUT_RING(GFX_OP_DESTBUFFER_VARS);
0547 OUT_RING(code[I810_DESTREG_DV1]);
0548
0549 OUT_RING(GFX_OP_DRAWRECT_INFO);
0550 OUT_RING(code[I810_DESTREG_DR1]);
0551 OUT_RING(code[I810_DESTREG_DR2]);
0552 OUT_RING(code[I810_DESTREG_DR3]);
0553 OUT_RING(code[I810_DESTREG_DR4]);
0554 OUT_RING(0);
0555
0556 ADVANCE_LP_RING();
0557 }
0558
0559 static void i810EmitState(struct drm_device *dev)
0560 {
0561 drm_i810_private_t *dev_priv = dev->dev_private;
0562 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
0563 unsigned int dirty = sarea_priv->dirty;
0564
0565 DRM_DEBUG("%x\n", dirty);
0566
0567 if (dirty & I810_UPLOAD_BUFFERS) {
0568 i810EmitDestVerified(dev, sarea_priv->BufferState);
0569 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
0570 }
0571
0572 if (dirty & I810_UPLOAD_CTX) {
0573 i810EmitContextVerified(dev, sarea_priv->ContextState);
0574 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
0575 }
0576
0577 if (dirty & I810_UPLOAD_TEX0) {
0578 i810EmitTexVerified(dev, sarea_priv->TexState[0]);
0579 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
0580 }
0581
0582 if (dirty & I810_UPLOAD_TEX1) {
0583 i810EmitTexVerified(dev, sarea_priv->TexState[1]);
0584 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
0585 }
0586 }
0587
0588
0589
0590 static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
0591 unsigned int clear_color,
0592 unsigned int clear_zval)
0593 {
0594 drm_i810_private_t *dev_priv = dev->dev_private;
0595 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
0596 int nbox = sarea_priv->nbox;
0597 struct drm_clip_rect *pbox = sarea_priv->boxes;
0598 int pitch = dev_priv->pitch;
0599 int cpp = 2;
0600 int i;
0601 RING_LOCALS;
0602
0603 if (dev_priv->current_page == 1) {
0604 unsigned int tmp = flags;
0605
0606 flags &= ~(I810_FRONT | I810_BACK);
0607 if (tmp & I810_FRONT)
0608 flags |= I810_BACK;
0609 if (tmp & I810_BACK)
0610 flags |= I810_FRONT;
0611 }
0612
0613 i810_kernel_lost_context(dev);
0614
0615 if (nbox > I810_NR_SAREA_CLIPRECTS)
0616 nbox = I810_NR_SAREA_CLIPRECTS;
0617
0618 for (i = 0; i < nbox; i++, pbox++) {
0619 unsigned int x = pbox->x1;
0620 unsigned int y = pbox->y1;
0621 unsigned int width = (pbox->x2 - x) * cpp;
0622 unsigned int height = pbox->y2 - y;
0623 unsigned int start = y * pitch + x * cpp;
0624
0625 if (pbox->x1 > pbox->x2 ||
0626 pbox->y1 > pbox->y2 ||
0627 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
0628 continue;
0629
0630 if (flags & I810_FRONT) {
0631 BEGIN_LP_RING(6);
0632 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
0633 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
0634 OUT_RING((height << 16) | width);
0635 OUT_RING(start);
0636 OUT_RING(clear_color);
0637 OUT_RING(0);
0638 ADVANCE_LP_RING();
0639 }
0640
0641 if (flags & I810_BACK) {
0642 BEGIN_LP_RING(6);
0643 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
0644 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
0645 OUT_RING((height << 16) | width);
0646 OUT_RING(dev_priv->back_offset + start);
0647 OUT_RING(clear_color);
0648 OUT_RING(0);
0649 ADVANCE_LP_RING();
0650 }
0651
0652 if (flags & I810_DEPTH) {
0653 BEGIN_LP_RING(6);
0654 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
0655 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
0656 OUT_RING((height << 16) | width);
0657 OUT_RING(dev_priv->depth_offset + start);
0658 OUT_RING(clear_zval);
0659 OUT_RING(0);
0660 ADVANCE_LP_RING();
0661 }
0662 }
0663 }
0664
0665 static void i810_dma_dispatch_swap(struct drm_device *dev)
0666 {
0667 drm_i810_private_t *dev_priv = dev->dev_private;
0668 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
0669 int nbox = sarea_priv->nbox;
0670 struct drm_clip_rect *pbox = sarea_priv->boxes;
0671 int pitch = dev_priv->pitch;
0672 int cpp = 2;
0673 int i;
0674 RING_LOCALS;
0675
0676 DRM_DEBUG("swapbuffers\n");
0677
0678 i810_kernel_lost_context(dev);
0679
0680 if (nbox > I810_NR_SAREA_CLIPRECTS)
0681 nbox = I810_NR_SAREA_CLIPRECTS;
0682
0683 for (i = 0; i < nbox; i++, pbox++) {
0684 unsigned int w = pbox->x2 - pbox->x1;
0685 unsigned int h = pbox->y2 - pbox->y1;
0686 unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
0687 unsigned int start = dst;
0688
0689 if (pbox->x1 > pbox->x2 ||
0690 pbox->y1 > pbox->y2 ||
0691 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
0692 continue;
0693
0694 BEGIN_LP_RING(6);
0695 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
0696 OUT_RING(pitch | (0xCC << 16));
0697 OUT_RING((h << 16) | (w * cpp));
0698 if (dev_priv->current_page == 0)
0699 OUT_RING(dev_priv->front_offset + start);
0700 else
0701 OUT_RING(dev_priv->back_offset + start);
0702 OUT_RING(pitch);
0703 if (dev_priv->current_page == 0)
0704 OUT_RING(dev_priv->back_offset + start);
0705 else
0706 OUT_RING(dev_priv->front_offset + start);
0707 ADVANCE_LP_RING();
0708 }
0709 }
0710
0711 static void i810_dma_dispatch_vertex(struct drm_device *dev,
0712 struct drm_buf *buf, int discard, int used)
0713 {
0714 drm_i810_private_t *dev_priv = dev->dev_private;
0715 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0716 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
0717 struct drm_clip_rect *box = sarea_priv->boxes;
0718 int nbox = sarea_priv->nbox;
0719 unsigned long address = (unsigned long)buf->bus_address;
0720 unsigned long start = address - dev->agp->base;
0721 int i = 0;
0722 RING_LOCALS;
0723
0724 i810_kernel_lost_context(dev);
0725
0726 if (nbox > I810_NR_SAREA_CLIPRECTS)
0727 nbox = I810_NR_SAREA_CLIPRECTS;
0728
0729 if (used < 0 || used > 4 * 1024)
0730 used = 0;
0731
0732 if (sarea_priv->dirty)
0733 i810EmitState(dev);
0734
0735 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
0736 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
0737
0738 *(u32 *) buf_priv->kernel_virtual =
0739 ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
0740
0741 if (used & 4) {
0742 *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
0743 used += 4;
0744 }
0745
0746 i810_unmap_buffer(buf);
0747 }
0748
0749 if (used) {
0750 do {
0751 if (i < nbox) {
0752 BEGIN_LP_RING(4);
0753 OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
0754 SC_ENABLE);
0755 OUT_RING(GFX_OP_SCISSOR_INFO);
0756 OUT_RING(box[i].x1 | (box[i].y1 << 16));
0757 OUT_RING((box[i].x2 -
0758 1) | ((box[i].y2 - 1) << 16));
0759 ADVANCE_LP_RING();
0760 }
0761
0762 BEGIN_LP_RING(4);
0763 OUT_RING(CMD_OP_BATCH_BUFFER);
0764 OUT_RING(start | BB1_PROTECTED);
0765 OUT_RING(start + used - 4);
0766 OUT_RING(0);
0767 ADVANCE_LP_RING();
0768
0769 } while (++i < nbox);
0770 }
0771
0772 if (discard) {
0773 dev_priv->counter++;
0774
0775 (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
0776 I810_BUF_HARDWARE);
0777
0778 BEGIN_LP_RING(8);
0779 OUT_RING(CMD_STORE_DWORD_IDX);
0780 OUT_RING(20);
0781 OUT_RING(dev_priv->counter);
0782 OUT_RING(CMD_STORE_DWORD_IDX);
0783 OUT_RING(buf_priv->my_use_idx);
0784 OUT_RING(I810_BUF_FREE);
0785 OUT_RING(CMD_REPORT_HEAD);
0786 OUT_RING(0);
0787 ADVANCE_LP_RING();
0788 }
0789 }
0790
0791 static void i810_dma_dispatch_flip(struct drm_device *dev)
0792 {
0793 drm_i810_private_t *dev_priv = dev->dev_private;
0794 int pitch = dev_priv->pitch;
0795 RING_LOCALS;
0796
0797 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
0798 dev_priv->current_page,
0799 dev_priv->sarea_priv->pf_current_page);
0800
0801 i810_kernel_lost_context(dev);
0802
0803 BEGIN_LP_RING(2);
0804 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
0805 OUT_RING(0);
0806 ADVANCE_LP_RING();
0807
0808 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
0809
0810
0811
0812
0813 OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) );
0814 if (dev_priv->current_page == 0) {
0815 OUT_RING(dev_priv->back_offset);
0816 dev_priv->current_page = 1;
0817 } else {
0818 OUT_RING(dev_priv->front_offset);
0819 dev_priv->current_page = 0;
0820 }
0821 OUT_RING(0);
0822 ADVANCE_LP_RING();
0823
0824 BEGIN_LP_RING(2);
0825 OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
0826 OUT_RING(0);
0827 ADVANCE_LP_RING();
0828
0829
0830
0831
0832
0833 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
0834
0835 }
0836
0837 static void i810_dma_quiescent(struct drm_device *dev)
0838 {
0839 drm_i810_private_t *dev_priv = dev->dev_private;
0840 RING_LOCALS;
0841
0842 i810_kernel_lost_context(dev);
0843
0844 BEGIN_LP_RING(4);
0845 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
0846 OUT_RING(CMD_REPORT_HEAD);
0847 OUT_RING(0);
0848 OUT_RING(0);
0849 ADVANCE_LP_RING();
0850
0851 i810_wait_ring(dev, dev_priv->ring.Size - 8);
0852 }
0853
0854 static void i810_flush_queue(struct drm_device *dev)
0855 {
0856 drm_i810_private_t *dev_priv = dev->dev_private;
0857 struct drm_device_dma *dma = dev->dma;
0858 int i;
0859 RING_LOCALS;
0860
0861 i810_kernel_lost_context(dev);
0862
0863 BEGIN_LP_RING(2);
0864 OUT_RING(CMD_REPORT_HEAD);
0865 OUT_RING(0);
0866 ADVANCE_LP_RING();
0867
0868 i810_wait_ring(dev, dev_priv->ring.Size - 8);
0869
0870 for (i = 0; i < dma->buf_count; i++) {
0871 struct drm_buf *buf = dma->buflist[i];
0872 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0873
0874 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
0875 I810_BUF_FREE);
0876
0877 if (used == I810_BUF_HARDWARE)
0878 DRM_DEBUG("reclaimed from HARDWARE\n");
0879 if (used == I810_BUF_CLIENT)
0880 DRM_DEBUG("still on client\n");
0881 }
0882
0883 return;
0884 }
0885
0886
0887 void i810_driver_reclaim_buffers(struct drm_device *dev,
0888 struct drm_file *file_priv)
0889 {
0890 struct drm_device_dma *dma = dev->dma;
0891 int i;
0892
0893 if (!dma)
0894 return;
0895 if (!dev->dev_private)
0896 return;
0897 if (!dma->buflist)
0898 return;
0899
0900 i810_flush_queue(dev);
0901
0902 for (i = 0; i < dma->buf_count; i++) {
0903 struct drm_buf *buf = dma->buflist[i];
0904 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
0905
0906 if (buf->file_priv == file_priv && buf_priv) {
0907 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
0908 I810_BUF_FREE);
0909
0910 if (used == I810_BUF_CLIENT)
0911 DRM_DEBUG("reclaimed from client\n");
0912 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
0913 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
0914 }
0915 }
0916 }
0917
0918 static int i810_flush_ioctl(struct drm_device *dev, void *data,
0919 struct drm_file *file_priv)
0920 {
0921 LOCK_TEST_WITH_RETURN(dev, file_priv);
0922
0923 i810_flush_queue(dev);
0924 return 0;
0925 }
0926
0927 static int i810_dma_vertex(struct drm_device *dev, void *data,
0928 struct drm_file *file_priv)
0929 {
0930 struct drm_device_dma *dma = dev->dma;
0931 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
0932 u32 *hw_status = dev_priv->hw_status_page;
0933 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
0934 dev_priv->sarea_priv;
0935 drm_i810_vertex_t *vertex = data;
0936
0937 LOCK_TEST_WITH_RETURN(dev, file_priv);
0938
0939 DRM_DEBUG("idx %d used %d discard %d\n",
0940 vertex->idx, vertex->used, vertex->discard);
0941
0942 if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
0943 return -EINVAL;
0944
0945 i810_dma_dispatch_vertex(dev,
0946 dma->buflist[vertex->idx],
0947 vertex->discard, vertex->used);
0948
0949 sarea_priv->last_enqueue = dev_priv->counter - 1;
0950 sarea_priv->last_dispatch = (int)hw_status[5];
0951
0952 return 0;
0953 }
0954
0955 static int i810_clear_bufs(struct drm_device *dev, void *data,
0956 struct drm_file *file_priv)
0957 {
0958 drm_i810_clear_t *clear = data;
0959
0960 LOCK_TEST_WITH_RETURN(dev, file_priv);
0961
0962
0963 if (!dev->dev_private)
0964 return -EINVAL;
0965
0966 i810_dma_dispatch_clear(dev, clear->flags,
0967 clear->clear_color, clear->clear_depth);
0968 return 0;
0969 }
0970
0971 static int i810_swap_bufs(struct drm_device *dev, void *data,
0972 struct drm_file *file_priv)
0973 {
0974 DRM_DEBUG("\n");
0975
0976 LOCK_TEST_WITH_RETURN(dev, file_priv);
0977
0978 i810_dma_dispatch_swap(dev);
0979 return 0;
0980 }
0981
0982 static int i810_getage(struct drm_device *dev, void *data,
0983 struct drm_file *file_priv)
0984 {
0985 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
0986 u32 *hw_status = dev_priv->hw_status_page;
0987 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
0988 dev_priv->sarea_priv;
0989
0990 sarea_priv->last_dispatch = (int)hw_status[5];
0991 return 0;
0992 }
0993
0994 static int i810_getbuf(struct drm_device *dev, void *data,
0995 struct drm_file *file_priv)
0996 {
0997 int retcode = 0;
0998 drm_i810_dma_t *d = data;
0999 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1000 u32 *hw_status = dev_priv->hw_status_page;
1001 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1002 dev_priv->sarea_priv;
1003
1004 LOCK_TEST_WITH_RETURN(dev, file_priv);
1005
1006 d->granted = 0;
1007
1008 retcode = i810_dma_get_buffer(dev, d, file_priv);
1009
1010 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1011 task_pid_nr(current), retcode, d->granted);
1012
1013 sarea_priv->last_dispatch = (int)hw_status[5];
1014
1015 return retcode;
1016 }
1017
1018 static int i810_copybuf(struct drm_device *dev, void *data,
1019 struct drm_file *file_priv)
1020 {
1021
1022 return 0;
1023 }
1024
1025 static int i810_docopy(struct drm_device *dev, void *data,
1026 struct drm_file *file_priv)
1027 {
1028
1029 return 0;
1030 }
1031
1032 static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
1033 unsigned int last_render)
1034 {
1035 drm_i810_private_t *dev_priv = dev->dev_private;
1036 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1037 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1038 unsigned long address = (unsigned long)buf->bus_address;
1039 unsigned long start = address - dev->agp->base;
1040 int u;
1041 RING_LOCALS;
1042
1043 i810_kernel_lost_context(dev);
1044
1045 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1046 if (u != I810_BUF_CLIENT)
1047 DRM_DEBUG("MC found buffer that isn't mine!\n");
1048
1049 if (used < 0 || used > 4 * 1024)
1050 used = 0;
1051
1052 sarea_priv->dirty = 0x7f;
1053
1054 DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
1055
1056 dev_priv->counter++;
1057 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1058 DRM_DEBUG("start : %lx\n", start);
1059 DRM_DEBUG("used : %d\n", used);
1060 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1061
1062 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1063 if (used & 4) {
1064 *(u32 *) ((char *) buf_priv->virtual + used) = 0;
1065 used += 4;
1066 }
1067
1068 i810_unmap_buffer(buf);
1069 }
1070 BEGIN_LP_RING(4);
1071 OUT_RING(CMD_OP_BATCH_BUFFER);
1072 OUT_RING(start | BB1_PROTECTED);
1073 OUT_RING(start + used - 4);
1074 OUT_RING(0);
1075 ADVANCE_LP_RING();
1076
1077 BEGIN_LP_RING(8);
1078 OUT_RING(CMD_STORE_DWORD_IDX);
1079 OUT_RING(buf_priv->my_use_idx);
1080 OUT_RING(I810_BUF_FREE);
1081 OUT_RING(0);
1082
1083 OUT_RING(CMD_STORE_DWORD_IDX);
1084 OUT_RING(16);
1085 OUT_RING(last_render);
1086 OUT_RING(0);
1087 ADVANCE_LP_RING();
1088 }
1089
1090 static int i810_dma_mc(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv)
1092 {
1093 struct drm_device_dma *dma = dev->dma;
1094 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1095 u32 *hw_status = dev_priv->hw_status_page;
1096 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1097 dev_priv->sarea_priv;
1098 drm_i810_mc_t *mc = data;
1099
1100 LOCK_TEST_WITH_RETURN(dev, file_priv);
1101
1102 if (mc->idx >= dma->buf_count || mc->idx < 0)
1103 return -EINVAL;
1104
1105 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1106 mc->last_render);
1107
1108 sarea_priv->last_enqueue = dev_priv->counter - 1;
1109 sarea_priv->last_dispatch = (int)hw_status[5];
1110
1111 return 0;
1112 }
1113
1114 static int i810_rstatus(struct drm_device *dev, void *data,
1115 struct drm_file *file_priv)
1116 {
1117 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1118
1119 return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1120 }
1121
1122 static int i810_ov0_info(struct drm_device *dev, void *data,
1123 struct drm_file *file_priv)
1124 {
1125 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1126 drm_i810_overlay_t *ov = data;
1127
1128 ov->offset = dev_priv->overlay_offset;
1129 ov->physical = dev_priv->overlay_physical;
1130
1131 return 0;
1132 }
1133
1134 static int i810_fstatus(struct drm_device *dev, void *data,
1135 struct drm_file *file_priv)
1136 {
1137 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1138
1139 LOCK_TEST_WITH_RETURN(dev, file_priv);
1140 return I810_READ(0x30008);
1141 }
1142
1143 static int i810_ov0_flip(struct drm_device *dev, void *data,
1144 struct drm_file *file_priv)
1145 {
1146 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1147
1148 LOCK_TEST_WITH_RETURN(dev, file_priv);
1149
1150
1151 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1152
1153 return 0;
1154 }
1155
1156
1157
1158 static void i810_do_init_pageflip(struct drm_device *dev)
1159 {
1160 drm_i810_private_t *dev_priv = dev->dev_private;
1161
1162 DRM_DEBUG("\n");
1163 dev_priv->page_flipping = 1;
1164 dev_priv->current_page = 0;
1165 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1166 }
1167
1168 static int i810_do_cleanup_pageflip(struct drm_device *dev)
1169 {
1170 drm_i810_private_t *dev_priv = dev->dev_private;
1171
1172 DRM_DEBUG("\n");
1173 if (dev_priv->current_page != 0)
1174 i810_dma_dispatch_flip(dev);
1175
1176 dev_priv->page_flipping = 0;
1177 return 0;
1178 }
1179
1180 static int i810_flip_bufs(struct drm_device *dev, void *data,
1181 struct drm_file *file_priv)
1182 {
1183 drm_i810_private_t *dev_priv = dev->dev_private;
1184
1185 DRM_DEBUG("\n");
1186
1187 LOCK_TEST_WITH_RETURN(dev, file_priv);
1188
1189 if (!dev_priv->page_flipping)
1190 i810_do_init_pageflip(dev);
1191
1192 i810_dma_dispatch_flip(dev);
1193 return 0;
1194 }
1195
1196 int i810_driver_load(struct drm_device *dev, unsigned long flags)
1197 {
1198 struct pci_dev *pdev = to_pci_dev(dev->dev);
1199
1200 dev->agp = drm_legacy_agp_init(dev);
1201 if (dev->agp) {
1202 dev->agp->agp_mtrr = arch_phys_wc_add(
1203 dev->agp->agp_info.aper_base,
1204 dev->agp->agp_info.aper_size *
1205 1024 * 1024);
1206 }
1207
1208
1209 if (!dev->agp)
1210 return -EINVAL;
1211
1212 pci_set_master(pdev);
1213
1214 return 0;
1215 }
1216
1217 void i810_driver_lastclose(struct drm_device *dev)
1218 {
1219 i810_dma_cleanup(dev);
1220 }
1221
1222 void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1223 {
1224 if (dev->dev_private) {
1225 drm_i810_private_t *dev_priv = dev->dev_private;
1226 if (dev_priv->page_flipping)
1227 i810_do_cleanup_pageflip(dev);
1228 }
1229
1230 if (file_priv->master && file_priv->master->lock.hw_lock) {
1231 drm_legacy_idlelock_take(&file_priv->master->lock);
1232 i810_driver_reclaim_buffers(dev, file_priv);
1233 drm_legacy_idlelock_release(&file_priv->master->lock);
1234 } else {
1235
1236
1237 i810_driver_reclaim_buffers(dev, file_priv);
1238 }
1239
1240 }
1241
1242 int i810_driver_dma_quiescent(struct drm_device *dev)
1243 {
1244 i810_dma_quiescent(dev);
1245 return 0;
1246 }
1247
1248 const struct drm_ioctl_desc i810_ioctls[] = {
1249 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1250 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1251 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1252 DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1253 DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
1254 DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
1255 DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1256 DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
1257 DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
1258 DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
1259 DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
1260 DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
1261 DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1262 DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
1263 DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1264 };
1265
1266 int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);