Back to home page

OSCL-LXR

 
 

    


0001 /* savage_bci.c -- BCI support for Savage
0002  *
0003  * Copyright 2004  Felix Kuehling
0004  * All Rights Reserved.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sub license,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the
0014  * next paragraph) shall be included in all copies or substantial portions
0015  * of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0018  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0019  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0020  * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
0021  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
0022  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
0023  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0024  */
0025 
0026 #include <linux/delay.h>
0027 #include <linux/pci.h>
0028 #include <linux/slab.h>
0029 #include <linux/uaccess.h>
0030 
0031 #include <drm/drm_device.h>
0032 #include <drm/drm_file.h>
0033 #include <drm/drm_print.h>
0034 #include <drm/savage_drm.h>
0035 
0036 #include "savage_drv.h"
0037 
0038 /* Need a long timeout for shadow status updates can take a while
0039  * and so can waiting for events when the queue is full. */
0040 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
0041 #define SAVAGE_EVENT_USEC_TIMEOUT   5000000 /* 5s */
0042 #define SAVAGE_FREELIST_DEBUG       0
0043 
0044 static int savage_do_cleanup_bci(struct drm_device *dev);
0045 
0046 static int
0047 savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
0048 {
0049     uint32_t mask = dev_priv->status_used_mask;
0050     uint32_t threshold = dev_priv->bci_threshold_hi;
0051     uint32_t status;
0052     int i;
0053 
0054 #if SAVAGE_BCI_DEBUG
0055     if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
0056         DRM_ERROR("Trying to emit %d words "
0057               "(more than guaranteed space in COB)\n", n);
0058 #endif
0059 
0060     for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
0061         mb();
0062         status = dev_priv->status_ptr[0];
0063         if ((status & mask) < threshold)
0064             return 0;
0065         udelay(1);
0066     }
0067 
0068 #if SAVAGE_BCI_DEBUG
0069     DRM_ERROR("failed!\n");
0070     DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
0071 #endif
0072     return -EBUSY;
0073 }
0074 
0075 static int
0076 savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
0077 {
0078     uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
0079     uint32_t status;
0080     int i;
0081 
0082     for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
0083         status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
0084         if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
0085             return 0;
0086         udelay(1);
0087     }
0088 
0089 #if SAVAGE_BCI_DEBUG
0090     DRM_ERROR("failed!\n");
0091     DRM_INFO("   status=0x%08x\n", status);
0092 #endif
0093     return -EBUSY;
0094 }
0095 
0096 static int
0097 savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
0098 {
0099     uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
0100     uint32_t status;
0101     int i;
0102 
0103     for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
0104         status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
0105         if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
0106             return 0;
0107         udelay(1);
0108     }
0109 
0110 #if SAVAGE_BCI_DEBUG
0111     DRM_ERROR("failed!\n");
0112     DRM_INFO("   status=0x%08x\n", status);
0113 #endif
0114     return -EBUSY;
0115 }
0116 
0117 /*
0118  * Waiting for events.
0119  *
0120  * The BIOSresets the event tag to 0 on mode changes. Therefore we
0121  * never emit 0 to the event tag. If we find a 0 event tag we know the
0122  * BIOS stomped on it and return success assuming that the BIOS waited
0123  * for engine idle.
0124  *
0125  * Note: if the Xserver uses the event tag it has to follow the same
0126  * rule. Otherwise there may be glitches every 2^16 events.
0127  */
0128 static int
0129 savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
0130 {
0131     uint32_t status;
0132     int i;
0133 
0134     for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
0135         mb();
0136         status = dev_priv->status_ptr[1];
0137         if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
0138             (status & 0xffff) == 0)
0139             return 0;
0140         udelay(1);
0141     }
0142 
0143 #if SAVAGE_BCI_DEBUG
0144     DRM_ERROR("failed!\n");
0145     DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
0146 #endif
0147 
0148     return -EBUSY;
0149 }
0150 
0151 static int
0152 savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
0153 {
0154     uint32_t status;
0155     int i;
0156 
0157     for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
0158         status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
0159         if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
0160             (status & 0xffff) == 0)
0161             return 0;
0162         udelay(1);
0163     }
0164 
0165 #if SAVAGE_BCI_DEBUG
0166     DRM_ERROR("failed!\n");
0167     DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
0168 #endif
0169 
0170     return -EBUSY;
0171 }
0172 
0173 uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
0174                    unsigned int flags)
0175 {
0176     uint16_t count;
0177     BCI_LOCALS;
0178 
0179     if (dev_priv->status_ptr) {
0180         /* coordinate with Xserver */
0181         count = dev_priv->status_ptr[1023];
0182         if (count < dev_priv->event_counter)
0183             dev_priv->event_wrap++;
0184     } else {
0185         count = dev_priv->event_counter;
0186     }
0187     count = (count + 1) & 0xffff;
0188     if (count == 0) {
0189         count++;    /* See the comment above savage_wait_event_*. */
0190         dev_priv->event_wrap++;
0191     }
0192     dev_priv->event_counter = count;
0193     if (dev_priv->status_ptr)
0194         dev_priv->status_ptr[1023] = (uint32_t) count;
0195 
0196     if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
0197         unsigned int wait_cmd = BCI_CMD_WAIT;
0198         if ((flags & SAVAGE_WAIT_2D))
0199             wait_cmd |= BCI_CMD_WAIT_2D;
0200         if ((flags & SAVAGE_WAIT_3D))
0201             wait_cmd |= BCI_CMD_WAIT_3D;
0202         BEGIN_BCI(2);
0203         BCI_WRITE(wait_cmd);
0204     } else {
0205         BEGIN_BCI(1);
0206     }
0207     BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
0208 
0209     return count;
0210 }
0211 
0212 /*
0213  * Freelist management
0214  */
0215 static int savage_freelist_init(struct drm_device * dev)
0216 {
0217     drm_savage_private_t *dev_priv = dev->dev_private;
0218     struct drm_device_dma *dma = dev->dma;
0219     struct drm_buf *buf;
0220     drm_savage_buf_priv_t *entry;
0221     int i;
0222     DRM_DEBUG("count=%d\n", dma->buf_count);
0223 
0224     dev_priv->head.next = &dev_priv->tail;
0225     dev_priv->head.prev = NULL;
0226     dev_priv->head.buf = NULL;
0227 
0228     dev_priv->tail.next = NULL;
0229     dev_priv->tail.prev = &dev_priv->head;
0230     dev_priv->tail.buf = NULL;
0231 
0232     for (i = 0; i < dma->buf_count; i++) {
0233         buf = dma->buflist[i];
0234         entry = buf->dev_private;
0235 
0236         SET_AGE(&entry->age, 0, 0);
0237         entry->buf = buf;
0238 
0239         entry->next = dev_priv->head.next;
0240         entry->prev = &dev_priv->head;
0241         dev_priv->head.next->prev = entry;
0242         dev_priv->head.next = entry;
0243     }
0244 
0245     return 0;
0246 }
0247 
0248 static struct drm_buf *savage_freelist_get(struct drm_device * dev)
0249 {
0250     drm_savage_private_t *dev_priv = dev->dev_private;
0251     drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
0252     uint16_t event;
0253     unsigned int wrap;
0254     DRM_DEBUG("\n");
0255 
0256     UPDATE_EVENT_COUNTER();
0257     if (dev_priv->status_ptr)
0258         event = dev_priv->status_ptr[1] & 0xffff;
0259     else
0260         event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
0261     wrap = dev_priv->event_wrap;
0262     if (event > dev_priv->event_counter)
0263         wrap--;     /* hardware hasn't passed the last wrap yet */
0264 
0265     DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
0266     DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
0267 
0268     if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
0269         drm_savage_buf_priv_t *next = tail->next;
0270         drm_savage_buf_priv_t *prev = tail->prev;
0271         prev->next = next;
0272         next->prev = prev;
0273         tail->next = tail->prev = NULL;
0274         return tail->buf;
0275     }
0276 
0277     DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
0278     return NULL;
0279 }
0280 
0281 void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
0282 {
0283     drm_savage_private_t *dev_priv = dev->dev_private;
0284     drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
0285 
0286     DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
0287 
0288     if (entry->next != NULL || entry->prev != NULL) {
0289         DRM_ERROR("entry already on freelist.\n");
0290         return;
0291     }
0292 
0293     prev = &dev_priv->head;
0294     next = prev->next;
0295     prev->next = entry;
0296     next->prev = entry;
0297     entry->prev = prev;
0298     entry->next = next;
0299 }
0300 
0301 /*
0302  * Command DMA
0303  */
0304 static int savage_dma_init(drm_savage_private_t * dev_priv)
0305 {
0306     unsigned int i;
0307 
0308     dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
0309         (SAVAGE_DMA_PAGE_SIZE * 4);
0310     dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages,
0311                         sizeof(drm_savage_dma_page_t),
0312                         GFP_KERNEL);
0313     if (dev_priv->dma_pages == NULL)
0314         return -ENOMEM;
0315 
0316     for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
0317         SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
0318         dev_priv->dma_pages[i].used = 0;
0319         dev_priv->dma_pages[i].flushed = 0;
0320     }
0321     SET_AGE(&dev_priv->last_dma_age, 0, 0);
0322 
0323     dev_priv->first_dma_page = 0;
0324     dev_priv->current_dma_page = 0;
0325 
0326     return 0;
0327 }
0328 
0329 void savage_dma_reset(drm_savage_private_t * dev_priv)
0330 {
0331     uint16_t event;
0332     unsigned int wrap, i;
0333     event = savage_bci_emit_event(dev_priv, 0);
0334     wrap = dev_priv->event_wrap;
0335     for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
0336         SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
0337         dev_priv->dma_pages[i].used = 0;
0338         dev_priv->dma_pages[i].flushed = 0;
0339     }
0340     SET_AGE(&dev_priv->last_dma_age, event, wrap);
0341     dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
0342 }
0343 
0344 void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
0345 {
0346     uint16_t event;
0347     unsigned int wrap;
0348 
0349     /* Faked DMA buffer pages don't age. */
0350     if (dev_priv->cmd_dma == &dev_priv->fake_dma)
0351         return;
0352 
0353     UPDATE_EVENT_COUNTER();
0354     if (dev_priv->status_ptr)
0355         event = dev_priv->status_ptr[1] & 0xffff;
0356     else
0357         event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
0358     wrap = dev_priv->event_wrap;
0359     if (event > dev_priv->event_counter)
0360         wrap--;     /* hardware hasn't passed the last wrap yet */
0361 
0362     if (dev_priv->dma_pages[page].age.wrap > wrap ||
0363         (dev_priv->dma_pages[page].age.wrap == wrap &&
0364          dev_priv->dma_pages[page].age.event > event)) {
0365         if (dev_priv->wait_evnt(dev_priv,
0366                     dev_priv->dma_pages[page].age.event)
0367             < 0)
0368             DRM_ERROR("wait_evnt failed!\n");
0369     }
0370 }
0371 
0372 uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
0373 {
0374     unsigned int cur = dev_priv->current_dma_page;
0375     unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
0376         dev_priv->dma_pages[cur].used;
0377     unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
0378         SAVAGE_DMA_PAGE_SIZE;
0379     uint32_t *dma_ptr;
0380     unsigned int i;
0381 
0382     DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
0383           cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
0384 
0385     if (cur + nr_pages < dev_priv->nr_dma_pages) {
0386         dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
0387             cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
0388         if (n < rest)
0389             rest = n;
0390         dev_priv->dma_pages[cur].used += rest;
0391         n -= rest;
0392         cur++;
0393     } else {
0394         dev_priv->dma_flush(dev_priv);
0395         nr_pages =
0396             (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
0397         for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
0398             dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
0399             dev_priv->dma_pages[i].used = 0;
0400             dev_priv->dma_pages[i].flushed = 0;
0401         }
0402         dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
0403         dev_priv->first_dma_page = cur = 0;
0404     }
0405     for (i = cur; nr_pages > 0; ++i, --nr_pages) {
0406 #if SAVAGE_DMA_DEBUG
0407         if (dev_priv->dma_pages[i].used) {
0408             DRM_ERROR("unflushed page %u: used=%u\n",
0409                   i, dev_priv->dma_pages[i].used);
0410         }
0411 #endif
0412         if (n > SAVAGE_DMA_PAGE_SIZE)
0413             dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
0414         else
0415             dev_priv->dma_pages[i].used = n;
0416         n -= SAVAGE_DMA_PAGE_SIZE;
0417     }
0418     dev_priv->current_dma_page = --i;
0419 
0420     DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
0421           i, dev_priv->dma_pages[i].used, n);
0422 
0423     savage_dma_wait(dev_priv, dev_priv->current_dma_page);
0424 
0425     return dma_ptr;
0426 }
0427 
0428 static void savage_dma_flush(drm_savage_private_t * dev_priv)
0429 {
0430     unsigned int first = dev_priv->first_dma_page;
0431     unsigned int cur = dev_priv->current_dma_page;
0432     uint16_t event;
0433     unsigned int wrap, pad, align, len, i;
0434     unsigned long phys_addr;
0435     BCI_LOCALS;
0436 
0437     if (first == cur &&
0438         dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
0439         return;
0440 
0441     /* pad length to multiples of 2 entries
0442      * align start of next DMA block to multiles of 8 entries */
0443     pad = -dev_priv->dma_pages[cur].used & 1;
0444     align = -(dev_priv->dma_pages[cur].used + pad) & 7;
0445 
0446     DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
0447           "pad=%u, align=%u\n",
0448           first, cur, dev_priv->dma_pages[first].flushed,
0449           dev_priv->dma_pages[cur].used, pad, align);
0450 
0451     /* pad with noops */
0452     if (pad) {
0453         uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
0454             cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
0455         dev_priv->dma_pages[cur].used += pad;
0456         while (pad != 0) {
0457             *dma_ptr++ = BCI_CMD_WAIT;
0458             pad--;
0459         }
0460     }
0461 
0462     mb();
0463 
0464     /* do flush ... */
0465     phys_addr = dev_priv->cmd_dma->offset +
0466         (first * SAVAGE_DMA_PAGE_SIZE +
0467          dev_priv->dma_pages[first].flushed) * 4;
0468     len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
0469         dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
0470 
0471     DRM_DEBUG("phys_addr=%lx, len=%u\n",
0472           phys_addr | dev_priv->dma_type, len);
0473 
0474     BEGIN_BCI(3);
0475     BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
0476     BCI_WRITE(phys_addr | dev_priv->dma_type);
0477     BCI_DMA(len);
0478 
0479     /* fix alignment of the start of the next block */
0480     dev_priv->dma_pages[cur].used += align;
0481 
0482     /* age DMA pages */
0483     event = savage_bci_emit_event(dev_priv, 0);
0484     wrap = dev_priv->event_wrap;
0485     for (i = first; i < cur; ++i) {
0486         SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
0487         dev_priv->dma_pages[i].used = 0;
0488         dev_priv->dma_pages[i].flushed = 0;
0489     }
0490     /* age the current page only when it's full */
0491     if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
0492         SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
0493         dev_priv->dma_pages[cur].used = 0;
0494         dev_priv->dma_pages[cur].flushed = 0;
0495         /* advance to next page */
0496         cur++;
0497         if (cur == dev_priv->nr_dma_pages)
0498             cur = 0;
0499         dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
0500     } else {
0501         dev_priv->first_dma_page = cur;
0502         dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
0503     }
0504     SET_AGE(&dev_priv->last_dma_age, event, wrap);
0505 
0506     DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
0507           dev_priv->dma_pages[cur].used,
0508           dev_priv->dma_pages[cur].flushed);
0509 }
0510 
0511 static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
0512 {
0513     unsigned int i, j;
0514     BCI_LOCALS;
0515 
0516     if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
0517         dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
0518         return;
0519 
0520     DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
0521           dev_priv->first_dma_page, dev_priv->current_dma_page,
0522           dev_priv->dma_pages[dev_priv->current_dma_page].used);
0523 
0524     for (i = dev_priv->first_dma_page;
0525          i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
0526          ++i) {
0527         uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
0528             i * SAVAGE_DMA_PAGE_SIZE;
0529 #if SAVAGE_DMA_DEBUG
0530         /* Sanity check: all pages except the last one must be full. */
0531         if (i < dev_priv->current_dma_page &&
0532             dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
0533             DRM_ERROR("partial DMA page %u: used=%u",
0534                   i, dev_priv->dma_pages[i].used);
0535         }
0536 #endif
0537         BEGIN_BCI(dev_priv->dma_pages[i].used);
0538         for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
0539             BCI_WRITE(dma_ptr[j]);
0540         }
0541         dev_priv->dma_pages[i].used = 0;
0542     }
0543 
0544     /* reset to first page */
0545     dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
0546 }
0547 
0548 int savage_driver_load(struct drm_device *dev, unsigned long chipset)
0549 {
0550     struct pci_dev *pdev = to_pci_dev(dev->dev);
0551     drm_savage_private_t *dev_priv;
0552 
0553     dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
0554     if (dev_priv == NULL)
0555         return -ENOMEM;
0556 
0557     dev->dev_private = (void *)dev_priv;
0558 
0559     dev_priv->chipset = (enum savage_family)chipset;
0560 
0561     pci_set_master(pdev);
0562 
0563     return 0;
0564 }
0565 
0566 
0567 /*
0568  * Initialize mappings. On Savage4 and SavageIX the alignment
0569  * and size of the aperture is not suitable for automatic MTRR setup
0570  * in drm_legacy_addmap. Therefore we add them manually before the maps are
0571  * initialized, and tear them down on last close.
0572  */
0573 int savage_driver_firstopen(struct drm_device *dev)
0574 {
0575     drm_savage_private_t *dev_priv = dev->dev_private;
0576     struct pci_dev *pdev = to_pci_dev(dev->dev);
0577     unsigned long mmio_base, fb_base, fb_size, aperture_base;
0578     int ret = 0;
0579 
0580     if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
0581         fb_base = pci_resource_start(pdev, 0);
0582         fb_size = SAVAGE_FB_SIZE_S3;
0583         mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
0584         aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
0585         /* this should always be true */
0586         if (pci_resource_len(pdev, 0) == 0x08000000) {
0587             /* Don't make MMIO write-cobining! We need 3
0588              * MTRRs. */
0589             dev_priv->mtrr_handles[0] =
0590                 arch_phys_wc_add(fb_base, 0x01000000);
0591             dev_priv->mtrr_handles[1] =
0592                 arch_phys_wc_add(fb_base + 0x02000000,
0593                          0x02000000);
0594             dev_priv->mtrr_handles[2] =
0595                 arch_phys_wc_add(fb_base + 0x04000000,
0596                         0x04000000);
0597         } else {
0598             DRM_ERROR("strange pci_resource_len %08llx\n",
0599                   (unsigned long long)
0600                   pci_resource_len(pdev, 0));
0601         }
0602     } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
0603            dev_priv->chipset != S3_SAVAGE2000) {
0604         mmio_base = pci_resource_start(pdev, 0);
0605         fb_base = pci_resource_start(pdev, 1);
0606         fb_size = SAVAGE_FB_SIZE_S4;
0607         aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
0608         /* this should always be true */
0609         if (pci_resource_len(pdev, 1) == 0x08000000) {
0610             /* Can use one MTRR to cover both fb and
0611              * aperture. */
0612             dev_priv->mtrr_handles[0] =
0613                 arch_phys_wc_add(fb_base,
0614                          0x08000000);
0615         } else {
0616             DRM_ERROR("strange pci_resource_len %08llx\n",
0617                   (unsigned long long)
0618                   pci_resource_len(pdev, 1));
0619         }
0620     } else {
0621         mmio_base = pci_resource_start(pdev, 0);
0622         fb_base = pci_resource_start(pdev, 1);
0623         fb_size = pci_resource_len(pdev, 1);
0624         aperture_base = pci_resource_start(pdev, 2);
0625         /* Automatic MTRR setup will do the right thing. */
0626     }
0627 
0628     ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
0629                 _DRM_REGISTERS, _DRM_READ_ONLY,
0630                 &dev_priv->mmio);
0631     if (ret)
0632         return ret;
0633 
0634     ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
0635                 _DRM_WRITE_COMBINING, &dev_priv->fb);
0636     if (ret)
0637         return ret;
0638 
0639     ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
0640                 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
0641                 &dev_priv->aperture);
0642     return ret;
0643 }
0644 
0645 /*
0646  * Delete MTRRs and free device-private data.
0647  */
0648 void savage_driver_lastclose(struct drm_device *dev)
0649 {
0650     drm_savage_private_t *dev_priv = dev->dev_private;
0651     int i;
0652 
0653     for (i = 0; i < 3; ++i) {
0654         arch_phys_wc_del(dev_priv->mtrr_handles[i]);
0655         dev_priv->mtrr_handles[i] = 0;
0656     }
0657 }
0658 
0659 void savage_driver_unload(struct drm_device *dev)
0660 {
0661     drm_savage_private_t *dev_priv = dev->dev_private;
0662 
0663     kfree(dev_priv);
0664 }
0665 
0666 static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
0667 {
0668     drm_savage_private_t *dev_priv = dev->dev_private;
0669 
0670     if (init->fb_bpp != 16 && init->fb_bpp != 32) {
0671         DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
0672         return -EINVAL;
0673     }
0674     if (init->depth_bpp != 16 && init->depth_bpp != 32) {
0675         DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
0676         return -EINVAL;
0677     }
0678     if (init->dma_type != SAVAGE_DMA_AGP &&
0679         init->dma_type != SAVAGE_DMA_PCI) {
0680         DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
0681         return -EINVAL;
0682     }
0683 
0684     dev_priv->cob_size = init->cob_size;
0685     dev_priv->bci_threshold_lo = init->bci_threshold_lo;
0686     dev_priv->bci_threshold_hi = init->bci_threshold_hi;
0687     dev_priv->dma_type = init->dma_type;
0688 
0689     dev_priv->fb_bpp = init->fb_bpp;
0690     dev_priv->front_offset = init->front_offset;
0691     dev_priv->front_pitch = init->front_pitch;
0692     dev_priv->back_offset = init->back_offset;
0693     dev_priv->back_pitch = init->back_pitch;
0694     dev_priv->depth_bpp = init->depth_bpp;
0695     dev_priv->depth_offset = init->depth_offset;
0696     dev_priv->depth_pitch = init->depth_pitch;
0697 
0698     dev_priv->texture_offset = init->texture_offset;
0699     dev_priv->texture_size = init->texture_size;
0700 
0701     dev_priv->sarea = drm_legacy_getsarea(dev);
0702     if (!dev_priv->sarea) {
0703         DRM_ERROR("could not find sarea!\n");
0704         savage_do_cleanup_bci(dev);
0705         return -EINVAL;
0706     }
0707     if (init->status_offset != 0) {
0708         dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
0709         if (!dev_priv->status) {
0710             DRM_ERROR("could not find shadow status region!\n");
0711             savage_do_cleanup_bci(dev);
0712             return -EINVAL;
0713         }
0714     } else {
0715         dev_priv->status = NULL;
0716     }
0717     if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
0718         dev->agp_buffer_token = init->buffers_offset;
0719         dev->agp_buffer_map = drm_legacy_findmap(dev,
0720                                init->buffers_offset);
0721         if (!dev->agp_buffer_map) {
0722             DRM_ERROR("could not find DMA buffer region!\n");
0723             savage_do_cleanup_bci(dev);
0724             return -EINVAL;
0725         }
0726         drm_legacy_ioremap(dev->agp_buffer_map, dev);
0727         if (!dev->agp_buffer_map->handle) {
0728             DRM_ERROR("failed to ioremap DMA buffer region!\n");
0729             savage_do_cleanup_bci(dev);
0730             return -ENOMEM;
0731         }
0732     }
0733     if (init->agp_textures_offset) {
0734         dev_priv->agp_textures =
0735             drm_legacy_findmap(dev, init->agp_textures_offset);
0736         if (!dev_priv->agp_textures) {
0737             DRM_ERROR("could not find agp texture region!\n");
0738             savage_do_cleanup_bci(dev);
0739             return -EINVAL;
0740         }
0741     } else {
0742         dev_priv->agp_textures = NULL;
0743     }
0744 
0745     if (init->cmd_dma_offset) {
0746         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
0747             DRM_ERROR("command DMA not supported on "
0748                   "Savage3D/MX/IX.\n");
0749             savage_do_cleanup_bci(dev);
0750             return -EINVAL;
0751         }
0752         if (dev->dma && dev->dma->buflist) {
0753             DRM_ERROR("command and vertex DMA not supported "
0754                   "at the same time.\n");
0755             savage_do_cleanup_bci(dev);
0756             return -EINVAL;
0757         }
0758         dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset);
0759         if (!dev_priv->cmd_dma) {
0760             DRM_ERROR("could not find command DMA region!\n");
0761             savage_do_cleanup_bci(dev);
0762             return -EINVAL;
0763         }
0764         if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
0765             if (dev_priv->cmd_dma->type != _DRM_AGP) {
0766                 DRM_ERROR("AGP command DMA region is not a "
0767                       "_DRM_AGP map!\n");
0768                 savage_do_cleanup_bci(dev);
0769                 return -EINVAL;
0770             }
0771             drm_legacy_ioremap(dev_priv->cmd_dma, dev);
0772             if (!dev_priv->cmd_dma->handle) {
0773                 DRM_ERROR("failed to ioremap command "
0774                       "DMA region!\n");
0775                 savage_do_cleanup_bci(dev);
0776                 return -ENOMEM;
0777             }
0778         } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
0779             DRM_ERROR("PCI command DMA region is not a "
0780                   "_DRM_CONSISTENT map!\n");
0781             savage_do_cleanup_bci(dev);
0782             return -EINVAL;
0783         }
0784     } else {
0785         dev_priv->cmd_dma = NULL;
0786     }
0787 
0788     dev_priv->dma_flush = savage_dma_flush;
0789     if (!dev_priv->cmd_dma) {
0790         DRM_DEBUG("falling back to faked command DMA.\n");
0791         dev_priv->fake_dma.offset = 0;
0792         dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
0793         dev_priv->fake_dma.type = _DRM_SHM;
0794         dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
0795                             GFP_KERNEL);
0796         if (!dev_priv->fake_dma.handle) {
0797             DRM_ERROR("could not allocate faked DMA buffer!\n");
0798             savage_do_cleanup_bci(dev);
0799             return -ENOMEM;
0800         }
0801         dev_priv->cmd_dma = &dev_priv->fake_dma;
0802         dev_priv->dma_flush = savage_fake_dma_flush;
0803     }
0804 
0805     dev_priv->sarea_priv =
0806         (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
0807                     init->sarea_priv_offset);
0808 
0809     /* setup bitmap descriptors */
0810     {
0811         unsigned int color_tile_format;
0812         unsigned int depth_tile_format;
0813         unsigned int front_stride, back_stride, depth_stride;
0814         if (dev_priv->chipset <= S3_SAVAGE4) {
0815             color_tile_format = dev_priv->fb_bpp == 16 ?
0816                 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
0817             depth_tile_format = dev_priv->depth_bpp == 16 ?
0818                 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
0819         } else {
0820             color_tile_format = SAVAGE_BD_TILE_DEST;
0821             depth_tile_format = SAVAGE_BD_TILE_DEST;
0822         }
0823         front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
0824         back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
0825         depth_stride =
0826             dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
0827 
0828         dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
0829             (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
0830             (color_tile_format << SAVAGE_BD_TILE_SHIFT);
0831 
0832         dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
0833             (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
0834             (color_tile_format << SAVAGE_BD_TILE_SHIFT);
0835 
0836         dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
0837             (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
0838             (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
0839     }
0840 
0841     /* setup status and bci ptr */
0842     dev_priv->event_counter = 0;
0843     dev_priv->event_wrap = 0;
0844     dev_priv->bci_ptr = (volatile uint32_t *)
0845         ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
0846     if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
0847         dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
0848     } else {
0849         dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
0850     }
0851     if (dev_priv->status != NULL) {
0852         dev_priv->status_ptr =
0853             (volatile uint32_t *)dev_priv->status->handle;
0854         dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
0855         dev_priv->wait_evnt = savage_bci_wait_event_shadow;
0856         dev_priv->status_ptr[1023] = dev_priv->event_counter;
0857     } else {
0858         dev_priv->status_ptr = NULL;
0859         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
0860             dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
0861         } else {
0862             dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
0863         }
0864         dev_priv->wait_evnt = savage_bci_wait_event_reg;
0865     }
0866 
0867     /* cliprect functions */
0868     if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
0869         dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
0870     else
0871         dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
0872 
0873     if (savage_freelist_init(dev) < 0) {
0874         DRM_ERROR("could not initialize freelist\n");
0875         savage_do_cleanup_bci(dev);
0876         return -ENOMEM;
0877     }
0878 
0879     if (savage_dma_init(dev_priv) < 0) {
0880         DRM_ERROR("could not initialize command DMA\n");
0881         savage_do_cleanup_bci(dev);
0882         return -ENOMEM;
0883     }
0884 
0885     return 0;
0886 }
0887 
0888 static int savage_do_cleanup_bci(struct drm_device * dev)
0889 {
0890     drm_savage_private_t *dev_priv = dev->dev_private;
0891 
0892     if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
0893         kfree(dev_priv->fake_dma.handle);
0894     } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
0895            dev_priv->cmd_dma->type == _DRM_AGP &&
0896            dev_priv->dma_type == SAVAGE_DMA_AGP)
0897         drm_legacy_ioremapfree(dev_priv->cmd_dma, dev);
0898 
0899     if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
0900         dev->agp_buffer_map && dev->agp_buffer_map->handle) {
0901         drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
0902         /* make sure the next instance (which may be running
0903          * in PCI mode) doesn't try to use an old
0904          * agp_buffer_map. */
0905         dev->agp_buffer_map = NULL;
0906     }
0907 
0908     kfree(dev_priv->dma_pages);
0909 
0910     return 0;
0911 }
0912 
0913 static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
0914 {
0915     drm_savage_init_t *init = data;
0916 
0917     LOCK_TEST_WITH_RETURN(dev, file_priv);
0918 
0919     switch (init->func) {
0920     case SAVAGE_INIT_BCI:
0921         return savage_do_init_bci(dev, init);
0922     case SAVAGE_CLEANUP_BCI:
0923         return savage_do_cleanup_bci(dev);
0924     }
0925 
0926     return -EINVAL;
0927 }
0928 
0929 static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
0930 {
0931     drm_savage_private_t *dev_priv = dev->dev_private;
0932     drm_savage_event_emit_t *event = data;
0933 
0934     DRM_DEBUG("\n");
0935 
0936     LOCK_TEST_WITH_RETURN(dev, file_priv);
0937 
0938     event->count = savage_bci_emit_event(dev_priv, event->flags);
0939     event->count |= dev_priv->event_wrap << 16;
0940 
0941     return 0;
0942 }
0943 
0944 static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
0945 {
0946     drm_savage_private_t *dev_priv = dev->dev_private;
0947     drm_savage_event_wait_t *event = data;
0948     unsigned int event_e, hw_e;
0949     unsigned int event_w, hw_w;
0950 
0951     DRM_DEBUG("\n");
0952 
0953     UPDATE_EVENT_COUNTER();
0954     if (dev_priv->status_ptr)
0955         hw_e = dev_priv->status_ptr[1] & 0xffff;
0956     else
0957         hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
0958     hw_w = dev_priv->event_wrap;
0959     if (hw_e > dev_priv->event_counter)
0960         hw_w--;     /* hardware hasn't passed the last wrap yet */
0961 
0962     event_e = event->count & 0xffff;
0963     event_w = event->count >> 16;
0964 
0965     /* Don't need to wait if
0966      * - event counter wrapped since the event was emitted or
0967      * - the hardware has advanced up to or over the event to wait for.
0968      */
0969     if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
0970         return 0;
0971     else
0972         return dev_priv->wait_evnt(dev_priv, event_e);
0973 }
0974 
0975 /*
0976  * DMA buffer management
0977  */
0978 
0979 static int savage_bci_get_buffers(struct drm_device *dev,
0980                   struct drm_file *file_priv,
0981                   struct drm_dma *d)
0982 {
0983     struct drm_buf *buf;
0984     int i;
0985 
0986     for (i = d->granted_count; i < d->request_count; i++) {
0987         buf = savage_freelist_get(dev);
0988         if (!buf)
0989             return -EAGAIN;
0990 
0991         buf->file_priv = file_priv;
0992 
0993         if (copy_to_user(&d->request_indices[i],
0994                      &buf->idx, sizeof(buf->idx)))
0995             return -EFAULT;
0996         if (copy_to_user(&d->request_sizes[i],
0997                      &buf->total, sizeof(buf->total)))
0998             return -EFAULT;
0999 
1000         d->granted_count++;
1001     }
1002     return 0;
1003 }
1004 
1005 int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1006 {
1007     struct drm_device_dma *dma = dev->dma;
1008     struct drm_dma *d = data;
1009     int ret = 0;
1010 
1011     LOCK_TEST_WITH_RETURN(dev, file_priv);
1012 
1013     /* Please don't send us buffers.
1014      */
1015     if (d->send_count != 0) {
1016         DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1017               task_pid_nr(current), d->send_count);
1018         return -EINVAL;
1019     }
1020 
1021     /* We'll send you buffers.
1022      */
1023     if (d->request_count < 0 || d->request_count > dma->buf_count) {
1024         DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1025               task_pid_nr(current), d->request_count, dma->buf_count);
1026         return -EINVAL;
1027     }
1028 
1029     d->granted_count = 0;
1030 
1031     if (d->request_count) {
1032         ret = savage_bci_get_buffers(dev, file_priv, d);
1033     }
1034 
1035     return ret;
1036 }
1037 
1038 void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1039 {
1040     struct drm_device_dma *dma = dev->dma;
1041     drm_savage_private_t *dev_priv = dev->dev_private;
1042     int release_idlelock = 0;
1043     int i;
1044 
1045     if (!dma)
1046         return;
1047     if (!dev_priv)
1048         return;
1049     if (!dma->buflist)
1050         return;
1051 
1052     if (file_priv->master && file_priv->master->lock.hw_lock) {
1053         drm_legacy_idlelock_take(&file_priv->master->lock);
1054         release_idlelock = 1;
1055     }
1056 
1057     for (i = 0; i < dma->buf_count; i++) {
1058         struct drm_buf *buf = dma->buflist[i];
1059         drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1060 
1061         if (buf->file_priv == file_priv && buf_priv &&
1062             buf_priv->next == NULL && buf_priv->prev == NULL) {
1063             uint16_t event;
1064             DRM_DEBUG("reclaimed from client\n");
1065             event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1066             SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1067             savage_freelist_put(dev, buf);
1068         }
1069     }
1070 
1071     if (release_idlelock)
1072         drm_legacy_idlelock_release(&file_priv->master->lock);
1073 }
1074 
1075 const struct drm_ioctl_desc savage_ioctls[] = {
1076     DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1077     DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1078     DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1079     DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1080 };
1081 
1082 int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);