Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 
0028 #include <linux/sched/signal.h>
0029 
0030 #include <drm/ttm/ttm_placement.h>
0031 
0032 #include "vmwgfx_drv.h"
0033 #include "vmwgfx_devcaps.h"
0034 
0035 bool vmw_supports_3d(struct vmw_private *dev_priv)
0036 {
0037     uint32_t fifo_min, hwversion;
0038     const struct vmw_fifo_state *fifo = dev_priv->fifo;
0039 
0040     if (!(dev_priv->capabilities & SVGA_CAP_3D))
0041         return false;
0042 
0043     if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
0044         uint32_t result;
0045 
0046         if (!dev_priv->has_mob)
0047             return false;
0048 
0049         result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
0050 
0051         return (result != 0);
0052     }
0053 
0054     if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
0055         return false;
0056 
0057     BUG_ON(vmw_is_svga_v3(dev_priv));
0058 
0059     fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
0060     if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
0061         return false;
0062 
0063     hwversion = vmw_fifo_mem_read(dev_priv,
0064                       ((fifo->capabilities &
0065                     SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
0066                            SVGA_FIFO_3D_HWVERSION_REVISED :
0067                            SVGA_FIFO_3D_HWVERSION));
0068 
0069     if (hwversion == 0)
0070         return false;
0071 
0072     if (hwversion < SVGA3D_HWVERSION_WS8_B1)
0073         return false;
0074 
0075     /* Legacy Display Unit does not support surfaces */
0076     if (dev_priv->active_display_unit == vmw_du_legacy)
0077         return false;
0078 
0079     return true;
0080 }
0081 
0082 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
0083 {
0084     uint32_t caps;
0085 
0086     if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
0087         return false;
0088 
0089     caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
0090     if (caps & SVGA_FIFO_CAP_PITCHLOCK)
0091         return true;
0092 
0093     return false;
0094 }
0095 
0096 struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
0097 {
0098     struct vmw_fifo_state *fifo;
0099     uint32_t max;
0100     uint32_t min;
0101 
0102     if (!dev_priv->fifo_mem)
0103         return NULL;
0104 
0105     fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
0106     if (!fifo)
0107         return ERR_PTR(-ENOMEM);
0108     fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
0109     fifo->static_buffer = vmalloc(fifo->static_buffer_size);
0110     if (unlikely(fifo->static_buffer == NULL)) {
0111         kfree(fifo);
0112         return ERR_PTR(-ENOMEM);
0113     }
0114 
0115     fifo->dynamic_buffer = NULL;
0116     fifo->reserved_size = 0;
0117     fifo->using_bounce_buffer = false;
0118 
0119     mutex_init(&fifo->fifo_mutex);
0120     init_rwsem(&fifo->rwsem);
0121     min = 4;
0122     if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
0123         min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
0124     min <<= 2;
0125 
0126     if (min < PAGE_SIZE)
0127         min = PAGE_SIZE;
0128 
0129     vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
0130     vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
0131     wmb();
0132     vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
0133     vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
0134     vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
0135     mb();
0136 
0137     vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
0138 
0139     max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
0140     min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
0141     fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
0142 
0143     drm_info(&dev_priv->drm,
0144          "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
0145          (unsigned int) max,
0146          (unsigned int) min,
0147          (unsigned int) fifo->capabilities);
0148 
0149     if (unlikely(min >= max)) {
0150         drm_warn(&dev_priv->drm,
0151              "FIFO memory is not usable. Driver failed to initialize.");
0152         return ERR_PTR(-ENXIO);
0153     }
0154 
0155     return fifo;
0156 }
0157 
0158 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
0159 {
0160     u32 *fifo_mem = dev_priv->fifo_mem;
0161     if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
0162         vmw_write(dev_priv, SVGA_REG_SYNC, reason);
0163 
0164 }
0165 
0166 void vmw_fifo_destroy(struct vmw_private *dev_priv)
0167 {
0168     struct vmw_fifo_state *fifo = dev_priv->fifo;
0169 
0170     if (!fifo)
0171         return;
0172 
0173     if (likely(fifo->static_buffer != NULL)) {
0174         vfree(fifo->static_buffer);
0175         fifo->static_buffer = NULL;
0176     }
0177 
0178     if (likely(fifo->dynamic_buffer != NULL)) {
0179         vfree(fifo->dynamic_buffer);
0180         fifo->dynamic_buffer = NULL;
0181     }
0182     kfree(fifo);
0183     dev_priv->fifo = NULL;
0184 }
0185 
0186 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
0187 {
0188     uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
0189     uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
0190     uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
0191     uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
0192 
0193     return ((max - next_cmd) + (stop - min) <= bytes);
0194 }
0195 
0196 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
0197                    uint32_t bytes, bool interruptible,
0198                    unsigned long timeout)
0199 {
0200     int ret = 0;
0201     unsigned long end_jiffies = jiffies + timeout;
0202     DEFINE_WAIT(__wait);
0203 
0204     DRM_INFO("Fifo wait noirq.\n");
0205 
0206     for (;;) {
0207         prepare_to_wait(&dev_priv->fifo_queue, &__wait,
0208                 (interruptible) ?
0209                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
0210         if (!vmw_fifo_is_full(dev_priv, bytes))
0211             break;
0212         if (time_after_eq(jiffies, end_jiffies)) {
0213             ret = -EBUSY;
0214             DRM_ERROR("SVGA device lockup.\n");
0215             break;
0216         }
0217         schedule_timeout(1);
0218         if (interruptible && signal_pending(current)) {
0219             ret = -ERESTARTSYS;
0220             break;
0221         }
0222     }
0223     finish_wait(&dev_priv->fifo_queue, &__wait);
0224     wake_up_all(&dev_priv->fifo_queue);
0225     DRM_INFO("Fifo noirq exit.\n");
0226     return ret;
0227 }
0228 
0229 static int vmw_fifo_wait(struct vmw_private *dev_priv,
0230              uint32_t bytes, bool interruptible,
0231              unsigned long timeout)
0232 {
0233     long ret = 1L;
0234 
0235     if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
0236         return 0;
0237 
0238     vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
0239     if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
0240         return vmw_fifo_wait_noirq(dev_priv, bytes,
0241                        interruptible, timeout);
0242 
0243     vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
0244                    &dev_priv->fifo_queue_waiters);
0245 
0246     if (interruptible)
0247         ret = wait_event_interruptible_timeout
0248             (dev_priv->fifo_queue,
0249              !vmw_fifo_is_full(dev_priv, bytes), timeout);
0250     else
0251         ret = wait_event_timeout
0252             (dev_priv->fifo_queue,
0253              !vmw_fifo_is_full(dev_priv, bytes), timeout);
0254 
0255     if (unlikely(ret == 0))
0256         ret = -EBUSY;
0257     else if (likely(ret > 0))
0258         ret = 0;
0259 
0260     vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
0261                   &dev_priv->fifo_queue_waiters);
0262 
0263     return ret;
0264 }
0265 
0266 /*
0267  * Reserve @bytes number of bytes in the fifo.
0268  *
0269  * This function will return NULL (error) on two conditions:
0270  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
0271  *   available fifo space.
0272  *
0273  * Returns:
0274  *   Pointer to the fifo, or null on error (possible hardware hang).
0275  */
0276 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
0277                     uint32_t bytes)
0278 {
0279     struct vmw_fifo_state *fifo_state = dev_priv->fifo;
0280     u32  *fifo_mem = dev_priv->fifo_mem;
0281     uint32_t max;
0282     uint32_t min;
0283     uint32_t next_cmd;
0284     uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
0285     int ret;
0286 
0287     mutex_lock(&fifo_state->fifo_mutex);
0288     max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
0289     min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
0290     next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
0291 
0292     if (unlikely(bytes >= (max - min)))
0293         goto out_err;
0294 
0295     BUG_ON(fifo_state->reserved_size != 0);
0296     BUG_ON(fifo_state->dynamic_buffer != NULL);
0297 
0298     fifo_state->reserved_size = bytes;
0299 
0300     while (1) {
0301         uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
0302         bool need_bounce = false;
0303         bool reserve_in_place = false;
0304 
0305         if (next_cmd >= stop) {
0306             if (likely((next_cmd + bytes < max ||
0307                     (next_cmd + bytes == max && stop > min))))
0308                 reserve_in_place = true;
0309 
0310             else if (vmw_fifo_is_full(dev_priv, bytes)) {
0311                 ret = vmw_fifo_wait(dev_priv, bytes,
0312                             false, 3 * HZ);
0313                 if (unlikely(ret != 0))
0314                     goto out_err;
0315             } else
0316                 need_bounce = true;
0317 
0318         } else {
0319 
0320             if (likely((next_cmd + bytes < stop)))
0321                 reserve_in_place = true;
0322             else {
0323                 ret = vmw_fifo_wait(dev_priv, bytes,
0324                             false, 3 * HZ);
0325                 if (unlikely(ret != 0))
0326                     goto out_err;
0327             }
0328         }
0329 
0330         if (reserve_in_place) {
0331             if (reserveable || bytes <= sizeof(uint32_t)) {
0332                 fifo_state->using_bounce_buffer = false;
0333 
0334                 if (reserveable)
0335                     vmw_fifo_mem_write(dev_priv,
0336                                SVGA_FIFO_RESERVED,
0337                                bytes);
0338                 return (void __force *) (fifo_mem +
0339                              (next_cmd >> 2));
0340             } else {
0341                 need_bounce = true;
0342             }
0343         }
0344 
0345         if (need_bounce) {
0346             fifo_state->using_bounce_buffer = true;
0347             if (bytes < fifo_state->static_buffer_size)
0348                 return fifo_state->static_buffer;
0349             else {
0350                 fifo_state->dynamic_buffer = vmalloc(bytes);
0351                 if (!fifo_state->dynamic_buffer)
0352                     goto out_err;
0353                 return fifo_state->dynamic_buffer;
0354             }
0355         }
0356     }
0357 out_err:
0358     fifo_state->reserved_size = 0;
0359     mutex_unlock(&fifo_state->fifo_mutex);
0360 
0361     return NULL;
0362 }
0363 
0364 void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
0365               int ctx_id)
0366 {
0367     void *ret;
0368 
0369     if (dev_priv->cman)
0370         ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
0371                      ctx_id, false, NULL);
0372     else if (ctx_id == SVGA3D_INVALID_ID)
0373         ret = vmw_local_fifo_reserve(dev_priv, bytes);
0374     else {
0375         WARN(1, "Command buffer has not been allocated.\n");
0376         ret = NULL;
0377     }
0378     if (IS_ERR_OR_NULL(ret))
0379         return NULL;
0380 
0381     return ret;
0382 }
0383 
0384 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
0385                   struct vmw_private *vmw,
0386                   uint32_t next_cmd,
0387                   uint32_t max, uint32_t min, uint32_t bytes)
0388 {
0389     u32 *fifo_mem = vmw->fifo_mem;
0390     uint32_t chunk_size = max - next_cmd;
0391     uint32_t rest;
0392     uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
0393         fifo_state->dynamic_buffer : fifo_state->static_buffer;
0394 
0395     if (bytes < chunk_size)
0396         chunk_size = bytes;
0397 
0398     vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
0399     mb();
0400     memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
0401     rest = bytes - chunk_size;
0402     if (rest)
0403         memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
0404 }
0405 
0406 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
0407                    struct vmw_private *vmw,
0408                    uint32_t next_cmd,
0409                    uint32_t max, uint32_t min, uint32_t bytes)
0410 {
0411     uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
0412         fifo_state->dynamic_buffer : fifo_state->static_buffer;
0413 
0414     while (bytes > 0) {
0415         vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
0416         next_cmd += sizeof(uint32_t);
0417         if (unlikely(next_cmd == max))
0418             next_cmd = min;
0419         mb();
0420         vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
0421         mb();
0422         bytes -= sizeof(uint32_t);
0423     }
0424 }
0425 
0426 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
0427 {
0428     struct vmw_fifo_state *fifo_state = dev_priv->fifo;
0429     uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
0430     uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
0431     uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
0432     bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
0433 
0434     BUG_ON((bytes & 3) != 0);
0435     BUG_ON(bytes > fifo_state->reserved_size);
0436 
0437     fifo_state->reserved_size = 0;
0438 
0439     if (fifo_state->using_bounce_buffer) {
0440         if (reserveable)
0441             vmw_fifo_res_copy(fifo_state, dev_priv,
0442                       next_cmd, max, min, bytes);
0443         else
0444             vmw_fifo_slow_copy(fifo_state, dev_priv,
0445                        next_cmd, max, min, bytes);
0446 
0447         if (fifo_state->dynamic_buffer) {
0448             vfree(fifo_state->dynamic_buffer);
0449             fifo_state->dynamic_buffer = NULL;
0450         }
0451 
0452     }
0453 
0454     down_write(&fifo_state->rwsem);
0455     if (fifo_state->using_bounce_buffer || reserveable) {
0456         next_cmd += bytes;
0457         if (next_cmd >= max)
0458             next_cmd -= max - min;
0459         mb();
0460         vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
0461     }
0462 
0463     if (reserveable)
0464         vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
0465     mb();
0466     up_write(&fifo_state->rwsem);
0467     vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
0468     mutex_unlock(&fifo_state->fifo_mutex);
0469 }
0470 
0471 void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
0472 {
0473     if (dev_priv->cman)
0474         vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
0475     else
0476         vmw_local_fifo_commit(dev_priv, bytes);
0477 }
0478 
0479 
0480 /**
0481  * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
0482  *
0483  * @dev_priv: Pointer to device private structure.
0484  * @bytes: Number of bytes to commit.
0485  */
0486 void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
0487 {
0488     if (dev_priv->cman)
0489         vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
0490     else
0491         vmw_local_fifo_commit(dev_priv, bytes);
0492 }
0493 
0494 /**
0495  * vmw_cmd_flush - Flush any buffered commands and make sure command processing
0496  * starts.
0497  *
0498  * @dev_priv: Pointer to device private structure.
0499  * @interruptible: Whether to wait interruptible if function needs to sleep.
0500  */
0501 int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
0502 {
0503     might_sleep();
0504 
0505     if (dev_priv->cman)
0506         return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
0507     else
0508         return 0;
0509 }
0510 
0511 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
0512 {
0513     struct svga_fifo_cmd_fence *cmd_fence;
0514     u32 *fm;
0515     int ret = 0;
0516     uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
0517 
0518     fm = VMW_CMD_RESERVE(dev_priv, bytes);
0519     if (unlikely(fm == NULL)) {
0520         *seqno = atomic_read(&dev_priv->marker_seq);
0521         ret = -ENOMEM;
0522         (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
0523                     false, 3*HZ);
0524         goto out_err;
0525     }
0526 
0527     do {
0528         *seqno = atomic_add_return(1, &dev_priv->marker_seq);
0529     } while (*seqno == 0);
0530 
0531     if (!vmw_has_fences(dev_priv)) {
0532 
0533         /*
0534          * Don't request hardware to send a fence. The
0535          * waiting code in vmwgfx_irq.c will emulate this.
0536          */
0537 
0538         vmw_cmd_commit(dev_priv, 0);
0539         return 0;
0540     }
0541 
0542     *fm++ = SVGA_CMD_FENCE;
0543     cmd_fence = (struct svga_fifo_cmd_fence *) fm;
0544     cmd_fence->fence = *seqno;
0545     vmw_cmd_commit_flush(dev_priv, bytes);
0546     vmw_update_seqno(dev_priv);
0547 
0548 out_err:
0549     return ret;
0550 }
0551 
0552 /**
0553  * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
0554  * legacy query commands.
0555  *
0556  * @dev_priv: The device private structure.
0557  * @cid: The hardware context id used for the query.
0558  *
0559  * See the vmw_cmd_emit_dummy_query documentation.
0560  */
0561 static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
0562                         uint32_t cid)
0563 {
0564     /*
0565      * A query wait without a preceding query end will
0566      * actually finish all queries for this cid
0567      * without writing to the query result structure.
0568      */
0569 
0570     struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
0571     struct {
0572         SVGA3dCmdHeader header;
0573         SVGA3dCmdWaitForQuery body;
0574     } *cmd;
0575 
0576     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0577     if (unlikely(cmd == NULL))
0578         return -ENOMEM;
0579 
0580     cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
0581     cmd->header.size = sizeof(cmd->body);
0582     cmd->body.cid = cid;
0583     cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
0584 
0585     if (bo->resource->mem_type == TTM_PL_VRAM) {
0586         cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
0587         cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
0588     } else {
0589         cmd->body.guestResult.gmrId = bo->resource->start;
0590         cmd->body.guestResult.offset = 0;
0591     }
0592 
0593     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0594 
0595     return 0;
0596 }
0597 
0598 /**
0599  * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
0600  * guest-backed resource query commands.
0601  *
0602  * @dev_priv: The device private structure.
0603  * @cid: The hardware context id used for the query.
0604  *
0605  * See the vmw_cmd_emit_dummy_query documentation.
0606  */
0607 static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
0608                        uint32_t cid)
0609 {
0610     /*
0611      * A query wait without a preceding query end will
0612      * actually finish all queries for this cid
0613      * without writing to the query result structure.
0614      */
0615 
0616     struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
0617     struct {
0618         SVGA3dCmdHeader header;
0619         SVGA3dCmdWaitForGBQuery body;
0620     } *cmd;
0621 
0622     cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0623     if (unlikely(cmd == NULL))
0624         return -ENOMEM;
0625 
0626     cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
0627     cmd->header.size = sizeof(cmd->body);
0628     cmd->body.cid = cid;
0629     cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
0630     BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
0631     cmd->body.mobid = bo->resource->start;
0632     cmd->body.offset = 0;
0633 
0634     vmw_cmd_commit(dev_priv, sizeof(*cmd));
0635 
0636     return 0;
0637 }
0638 
0639 
0640 /**
0641  * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
0642  * appropriate resource query commands.
0643  *
0644  * @dev_priv: The device private structure.
0645  * @cid: The hardware context id used for the query.
0646  *
0647  * This function is used to emit a dummy occlusion query with
0648  * no primitives rendered between query begin and query end.
0649  * It's used to provide a query barrier, in order to know that when
0650  * this query is finished, all preceding queries are also finished.
0651  *
0652  * A Query results structure should have been initialized at the start
0653  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
0654  * must also be either reserved or pinned when this function is called.
0655  *
0656  * Returns -ENOMEM on failure to reserve fifo space.
0657  */
0658 int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
0659                   uint32_t cid)
0660 {
0661     if (dev_priv->has_mob)
0662         return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
0663 
0664     return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
0665 }
0666 
0667 
0668 /**
0669  * vmw_cmd_supported - returns true if the given device supports
0670  * command queues.
0671  *
0672  * @vmw: The device private structure.
0673  *
0674  * Returns true if we can issue commands.
0675  */
0676 bool vmw_cmd_supported(struct vmw_private *vmw)
0677 {
0678     bool has_cmdbufs =
0679         (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
0680                       SVGA_CAP_CMD_BUFFERS_2)) != 0;
0681     if (vmw_is_svga_v3(vmw))
0682         return (has_cmdbufs &&
0683             (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
0684     /*
0685      * We have FIFO cmd's
0686      */
0687     return has_cmdbufs || vmw->fifo_mem != NULL;
0688 }