Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 /**************************************************************************
0003  *
0004  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the
0008  * "Software"), to deal in the Software without restriction, including
0009  * without limitation the rights to use, copy, modify, merge, publish,
0010  * distribute, sub license, and/or sell copies of the Software, and to
0011  * permit persons to whom the Software is furnished to do so, subject to
0012  * the following conditions:
0013  *
0014  * The above copyright notice and this permission notice (including the
0015  * next paragraph) shall be included in all copies or substantial portions
0016  * of the Software.
0017  *
0018  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0019  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0020  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0021  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0022  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0023  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0024  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0025  *
0026  **************************************************************************/
0027 
0028 #include <linux/dmapool.h>
0029 #include <linux/pci.h>
0030 
0031 #include <drm/ttm/ttm_bo_api.h>
0032 
0033 #include "vmwgfx_drv.h"
0034 
0035 /*
0036  * Size of inline command buffers. Try to make sure that a page size is a
0037  * multiple of the DMA pool allocation size.
0038  */
0039 #define VMW_CMDBUF_INLINE_ALIGN 64
0040 #define VMW_CMDBUF_INLINE_SIZE \
0041     (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
0042 
0043 /**
0044  * struct vmw_cmdbuf_context - Command buffer context queues
0045  *
0046  * @submitted: List of command buffers that have been submitted to the
0047  * manager but not yet submitted to hardware.
0048  * @hw_submitted: List of command buffers submitted to hardware.
0049  * @preempted: List of preempted command buffers.
0050  * @num_hw_submitted: Number of buffers currently being processed by hardware
0051  * @block_submission: Identifies a block command submission.
0052  */
0053 struct vmw_cmdbuf_context {
0054     struct list_head submitted;
0055     struct list_head hw_submitted;
0056     struct list_head preempted;
0057     unsigned num_hw_submitted;
0058     bool block_submission;
0059 };
0060 
0061 /**
0062  * struct vmw_cmdbuf_man - Command buffer manager
0063  *
0064  * @cur_mutex: Mutex protecting the command buffer used for incremental small
0065  * kernel command submissions, @cur.
0066  * @space_mutex: Mutex to protect against starvation when we allocate
0067  * main pool buffer space.
0068  * @error_mutex: Mutex to serialize the work queue error handling.
0069  * Note this is not needed if the same workqueue handler
0070  * can't race with itself...
0071  * @work: A struct work_struct implementeing command buffer error handling.
0072  * Immutable.
0073  * @dev_priv: Pointer to the device private struct. Immutable.
0074  * @ctx: Array of command buffer context queues. The queues and the context
0075  * data is protected by @lock.
0076  * @error: List of command buffers that have caused device errors.
0077  * Protected by @lock.
0078  * @mm: Range manager for the command buffer space. Manager allocations and
0079  * frees are protected by @lock.
0080  * @cmd_space: Buffer object for the command buffer space, unless we were
0081  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
0082  * @map_obj: Mapping state for @cmd_space. Immutable.
0083  * @map: Pointer to command buffer space. May be a mapped buffer object or
0084  * a contigous coherent DMA memory allocation. Immutable.
0085  * @cur: Command buffer for small kernel command submissions. Protected by
0086  * the @cur_mutex.
0087  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
0088  * @default_size: Default size for the @cur command buffer. Immutable.
0089  * @max_hw_submitted: Max number of in-flight command buffers the device can
0090  * handle. Immutable.
0091  * @lock: Spinlock protecting command submission queues.
0092  * @headers: Pool of DMA memory for device command buffer headers.
0093  * Internal protection.
0094  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
0095  * space for inline data. Internal protection.
0096  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
0097  * space.
0098  * @idle_queue: Wait queue for processes waiting for command buffer idle.
0099  * @irq_on: Whether the process function has requested irq to be turned on.
0100  * Protected by @lock.
0101  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
0102  * allocation. Immutable.
0103  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
0104  * Typically this is false only during bootstrap.
0105  * @handle: DMA address handle for the command buffer space if @using_mob is
0106  * false. Immutable.
0107  * @size: The size of the command buffer space. Immutable.
0108  * @num_contexts: Number of contexts actually enabled.
0109  */
0110 struct vmw_cmdbuf_man {
0111     struct mutex cur_mutex;
0112     struct mutex space_mutex;
0113     struct mutex error_mutex;
0114     struct work_struct work;
0115     struct vmw_private *dev_priv;
0116     struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
0117     struct list_head error;
0118     struct drm_mm mm;
0119     struct ttm_buffer_object *cmd_space;
0120     struct ttm_bo_kmap_obj map_obj;
0121     u8 *map;
0122     struct vmw_cmdbuf_header *cur;
0123     size_t cur_pos;
0124     size_t default_size;
0125     unsigned max_hw_submitted;
0126     spinlock_t lock;
0127     struct dma_pool *headers;
0128     struct dma_pool *dheaders;
0129     wait_queue_head_t alloc_queue;
0130     wait_queue_head_t idle_queue;
0131     bool irq_on;
0132     bool using_mob;
0133     bool has_pool;
0134     dma_addr_t handle;
0135     size_t size;
0136     u32 num_contexts;
0137 };
0138 
0139 /**
0140  * struct vmw_cmdbuf_header - Command buffer metadata
0141  *
0142  * @man: The command buffer manager.
0143  * @cb_header: Device command buffer header, allocated from a DMA pool.
0144  * @cb_context: The device command buffer context.
0145  * @list: List head for attaching to the manager lists.
0146  * @node: The range manager node.
0147  * @handle: The DMA address of @cb_header. Handed to the device on command
0148  * buffer submission.
0149  * @cmd: Pointer to the command buffer space of this buffer.
0150  * @size: Size of the command buffer space of this buffer.
0151  * @reserved: Reserved space of this buffer.
0152  * @inline_space: Whether inline command buffer space is used.
0153  */
0154 struct vmw_cmdbuf_header {
0155     struct vmw_cmdbuf_man *man;
0156     SVGACBHeader *cb_header;
0157     SVGACBContext cb_context;
0158     struct list_head list;
0159     struct drm_mm_node node;
0160     dma_addr_t handle;
0161     u8 *cmd;
0162     size_t size;
0163     size_t reserved;
0164     bool inline_space;
0165 };
0166 
0167 /**
0168  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
0169  * command buffer space.
0170  *
0171  * @cb_header: Device command buffer header.
0172  * @cmd: Inline command buffer space.
0173  */
0174 struct vmw_cmdbuf_dheader {
0175     SVGACBHeader cb_header;
0176     u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
0177 };
0178 
0179 /**
0180  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
0181  *
0182  * @page_size: Size of requested command buffer space in pages.
0183  * @node: Pointer to the range manager node.
0184  * @done: True if this allocation has succeeded.
0185  */
0186 struct vmw_cmdbuf_alloc_info {
0187     size_t page_size;
0188     struct drm_mm_node *node;
0189     bool done;
0190 };
0191 
0192 /* Loop over each context in the command buffer manager. */
0193 #define for_each_cmdbuf_ctx(_man, _i, _ctx)             \
0194     for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
0195          ++(_i), ++(_ctx))
0196 
0197 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
0198                 bool enable);
0199 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
0200 
0201 /**
0202  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
0203  *
0204  * @man: The range manager.
0205  * @interruptible: Whether to wait interruptible when locking.
0206  */
0207 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
0208 {
0209     if (interruptible) {
0210         if (mutex_lock_interruptible(&man->cur_mutex))
0211             return -ERESTARTSYS;
0212     } else {
0213         mutex_lock(&man->cur_mutex);
0214     }
0215 
0216     return 0;
0217 }
0218 
0219 /**
0220  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
0221  *
0222  * @man: The range manager.
0223  */
0224 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
0225 {
0226     mutex_unlock(&man->cur_mutex);
0227 }
0228 
0229 /**
0230  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
0231  * been used for the device context with inline command buffers.
0232  * Need not be called locked.
0233  *
0234  * @header: Pointer to the header to free.
0235  */
0236 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
0237 {
0238     struct vmw_cmdbuf_dheader *dheader;
0239 
0240     if (WARN_ON_ONCE(!header->inline_space))
0241         return;
0242 
0243     dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
0244                    cb_header);
0245     dma_pool_free(header->man->dheaders, dheader, header->handle);
0246     kfree(header);
0247 }
0248 
0249 /**
0250  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
0251  * associated structures.
0252  *
0253  * @header: Pointer to the header to free.
0254  *
0255  * For internal use. Must be called with man::lock held.
0256  */
0257 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
0258 {
0259     struct vmw_cmdbuf_man *man = header->man;
0260 
0261     lockdep_assert_held_once(&man->lock);
0262 
0263     if (header->inline_space) {
0264         vmw_cmdbuf_header_inline_free(header);
0265         return;
0266     }
0267 
0268     drm_mm_remove_node(&header->node);
0269     wake_up_all(&man->alloc_queue);
0270     if (header->cb_header)
0271         dma_pool_free(man->headers, header->cb_header,
0272                   header->handle);
0273     kfree(header);
0274 }
0275 
0276 /**
0277  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
0278  * associated structures.
0279  *
0280  * @header: Pointer to the header to free.
0281  */
0282 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
0283 {
0284     struct vmw_cmdbuf_man *man = header->man;
0285 
0286     /* Avoid locking if inline_space */
0287     if (header->inline_space) {
0288         vmw_cmdbuf_header_inline_free(header);
0289         return;
0290     }
0291     spin_lock(&man->lock);
0292     __vmw_cmdbuf_header_free(header);
0293     spin_unlock(&man->lock);
0294 }
0295 
0296 
0297 /**
0298  * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
0299  *
0300  * @header: The header of the buffer to submit.
0301  */
0302 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
0303 {
0304     struct vmw_cmdbuf_man *man = header->man;
0305     u32 val;
0306 
0307     val = upper_32_bits(header->handle);
0308     vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
0309 
0310     val = lower_32_bits(header->handle);
0311     val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
0312     vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
0313 
0314     return header->cb_header->status;
0315 }
0316 
0317 /**
0318  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
0319  *
0320  * @ctx: The command buffer context to initialize
0321  */
0322 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
0323 {
0324     INIT_LIST_HEAD(&ctx->hw_submitted);
0325     INIT_LIST_HEAD(&ctx->submitted);
0326     INIT_LIST_HEAD(&ctx->preempted);
0327     ctx->num_hw_submitted = 0;
0328 }
0329 
0330 /**
0331  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
0332  * context.
0333  *
0334  * @man: The command buffer manager.
0335  * @ctx: The command buffer context.
0336  *
0337  * Submits command buffers to hardware until there are no more command
0338  * buffers to submit or the hardware can't handle more command buffers.
0339  */
0340 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
0341                   struct vmw_cmdbuf_context *ctx)
0342 {
0343     while (ctx->num_hw_submitted < man->max_hw_submitted &&
0344            !list_empty(&ctx->submitted) &&
0345            !ctx->block_submission) {
0346         struct vmw_cmdbuf_header *entry;
0347         SVGACBStatus status;
0348 
0349         entry = list_first_entry(&ctx->submitted,
0350                      struct vmw_cmdbuf_header,
0351                      list);
0352 
0353         status = vmw_cmdbuf_header_submit(entry);
0354 
0355         /* This should never happen */
0356         if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
0357             entry->cb_header->status = SVGA_CB_STATUS_NONE;
0358             break;
0359         }
0360 
0361         list_move_tail(&entry->list, &ctx->hw_submitted);
0362         ctx->num_hw_submitted++;
0363     }
0364 
0365 }
0366 
0367 /**
0368  * vmw_cmdbuf_ctx_process - Process a command buffer context.
0369  *
0370  * @man: The command buffer manager.
0371  * @ctx: The command buffer context.
0372  * @notempty: Pass back count of non-empty command submitted lists.
0373  *
0374  * Submit command buffers to hardware if possible, and process finished
0375  * buffers. Typically freeing them, but on preemption or error take
0376  * appropriate action. Wake up waiters if appropriate.
0377  */
0378 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
0379                    struct vmw_cmdbuf_context *ctx,
0380                    int *notempty)
0381 {
0382     struct vmw_cmdbuf_header *entry, *next;
0383 
0384     vmw_cmdbuf_ctx_submit(man, ctx);
0385 
0386     list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
0387         SVGACBStatus status = entry->cb_header->status;
0388 
0389         if (status == SVGA_CB_STATUS_NONE)
0390             break;
0391 
0392         list_del(&entry->list);
0393         wake_up_all(&man->idle_queue);
0394         ctx->num_hw_submitted--;
0395         switch (status) {
0396         case SVGA_CB_STATUS_COMPLETED:
0397             __vmw_cmdbuf_header_free(entry);
0398             break;
0399         case SVGA_CB_STATUS_COMMAND_ERROR:
0400             WARN_ONCE(true, "Command buffer error.\n");
0401             entry->cb_header->status = SVGA_CB_STATUS_NONE;
0402             list_add_tail(&entry->list, &man->error);
0403             schedule_work(&man->work);
0404             break;
0405         case SVGA_CB_STATUS_PREEMPTED:
0406             entry->cb_header->status = SVGA_CB_STATUS_NONE;
0407             list_add_tail(&entry->list, &ctx->preempted);
0408             break;
0409         case SVGA_CB_STATUS_CB_HEADER_ERROR:
0410             WARN_ONCE(true, "Command buffer header error.\n");
0411             __vmw_cmdbuf_header_free(entry);
0412             break;
0413         default:
0414             WARN_ONCE(true, "Undefined command buffer status.\n");
0415             __vmw_cmdbuf_header_free(entry);
0416             break;
0417         }
0418     }
0419 
0420     vmw_cmdbuf_ctx_submit(man, ctx);
0421     if (!list_empty(&ctx->submitted))
0422         (*notempty)++;
0423 }
0424 
0425 /**
0426  * vmw_cmdbuf_man_process - Process all command buffer contexts and
0427  * switch on and off irqs as appropriate.
0428  *
0429  * @man: The command buffer manager.
0430  *
0431  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
0432  * command buffers left that are not submitted to hardware, Make sure
0433  * IRQ handling is turned on. Otherwise, make sure it's turned off.
0434  */
0435 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
0436 {
0437     int notempty;
0438     struct vmw_cmdbuf_context *ctx;
0439     int i;
0440 
0441 retry:
0442     notempty = 0;
0443     for_each_cmdbuf_ctx(man, i, ctx)
0444         vmw_cmdbuf_ctx_process(man, ctx, &notempty);
0445 
0446     if (man->irq_on && !notempty) {
0447         vmw_generic_waiter_remove(man->dev_priv,
0448                       SVGA_IRQFLAG_COMMAND_BUFFER,
0449                       &man->dev_priv->cmdbuf_waiters);
0450         man->irq_on = false;
0451     } else if (!man->irq_on && notempty) {
0452         vmw_generic_waiter_add(man->dev_priv,
0453                        SVGA_IRQFLAG_COMMAND_BUFFER,
0454                        &man->dev_priv->cmdbuf_waiters);
0455         man->irq_on = true;
0456 
0457         /* Rerun in case we just missed an irq. */
0458         goto retry;
0459     }
0460 }
0461 
0462 /**
0463  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
0464  * command buffer context
0465  *
0466  * @man: The command buffer manager.
0467  * @header: The header of the buffer to submit.
0468  * @cb_context: The command buffer context to use.
0469  *
0470  * This function adds @header to the "submitted" queue of the command
0471  * buffer context identified by @cb_context. It then calls the command buffer
0472  * manager processing to potentially submit the buffer to hardware.
0473  * @man->lock needs to be held when calling this function.
0474  */
0475 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
0476                    struct vmw_cmdbuf_header *header,
0477                    SVGACBContext cb_context)
0478 {
0479     if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
0480         header->cb_header->dxContext = 0;
0481     header->cb_context = cb_context;
0482     list_add_tail(&header->list, &man->ctx[cb_context].submitted);
0483 
0484     vmw_cmdbuf_man_process(man);
0485 }
0486 
0487 /**
0488  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
0489  * handler implemented as a threaded irq task.
0490  *
0491  * @man: Pointer to the command buffer manager.
0492  *
0493  * The bottom half of the interrupt handler simply calls into the
0494  * command buffer processor to free finished buffers and submit any
0495  * queued buffers to hardware.
0496  */
0497 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
0498 {
0499     spin_lock(&man->lock);
0500     vmw_cmdbuf_man_process(man);
0501     spin_unlock(&man->lock);
0502 }
0503 
0504 /**
0505  * vmw_cmdbuf_work_func - The deferred work function that handles
0506  * command buffer errors.
0507  *
0508  * @work: The work func closure argument.
0509  *
0510  * Restarting the command buffer context after an error requires process
0511  * context, so it is deferred to this work function.
0512  */
0513 static void vmw_cmdbuf_work_func(struct work_struct *work)
0514 {
0515     struct vmw_cmdbuf_man *man =
0516         container_of(work, struct vmw_cmdbuf_man, work);
0517     struct vmw_cmdbuf_header *entry, *next;
0518     uint32_t dummy = 0;
0519     bool send_fence = false;
0520     struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
0521     int i;
0522     struct vmw_cmdbuf_context *ctx;
0523     bool global_block = false;
0524 
0525     for_each_cmdbuf_ctx(man, i, ctx)
0526         INIT_LIST_HEAD(&restart_head[i]);
0527 
0528     mutex_lock(&man->error_mutex);
0529     spin_lock(&man->lock);
0530     list_for_each_entry_safe(entry, next, &man->error, list) {
0531         SVGACBHeader *cb_hdr = entry->cb_header;
0532         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
0533             (entry->cmd + cb_hdr->errorOffset);
0534         u32 error_cmd_size, new_start_offset;
0535         const char *cmd_name;
0536 
0537         list_del_init(&entry->list);
0538         global_block = true;
0539 
0540         if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
0541             VMW_DEBUG_USER("Unknown command causing device error.\n");
0542             VMW_DEBUG_USER("Command buffer offset is %lu\n",
0543                        (unsigned long) cb_hdr->errorOffset);
0544             __vmw_cmdbuf_header_free(entry);
0545             send_fence = true;
0546             continue;
0547         }
0548 
0549         VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
0550                    cmd_name);
0551         VMW_DEBUG_USER("Command buffer offset is %lu\n",
0552                    (unsigned long) cb_hdr->errorOffset);
0553         VMW_DEBUG_USER("Command size is %lu\n",
0554                    (unsigned long) error_cmd_size);
0555 
0556         new_start_offset = cb_hdr->errorOffset + error_cmd_size;
0557 
0558         if (new_start_offset >= cb_hdr->length) {
0559             __vmw_cmdbuf_header_free(entry);
0560             send_fence = true;
0561             continue;
0562         }
0563 
0564         if (man->using_mob)
0565             cb_hdr->ptr.mob.mobOffset += new_start_offset;
0566         else
0567             cb_hdr->ptr.pa += (u64) new_start_offset;
0568 
0569         entry->cmd += new_start_offset;
0570         cb_hdr->length -= new_start_offset;
0571         cb_hdr->errorOffset = 0;
0572         cb_hdr->offset = 0;
0573 
0574         list_add_tail(&entry->list, &restart_head[entry->cb_context]);
0575     }
0576 
0577     for_each_cmdbuf_ctx(man, i, ctx)
0578         man->ctx[i].block_submission = true;
0579 
0580     spin_unlock(&man->lock);
0581 
0582     /* Preempt all contexts */
0583     if (global_block && vmw_cmdbuf_preempt(man, 0))
0584         DRM_ERROR("Failed preempting command buffer contexts\n");
0585 
0586     spin_lock(&man->lock);
0587     for_each_cmdbuf_ctx(man, i, ctx) {
0588         /* Move preempted command buffers to the preempted queue. */
0589         vmw_cmdbuf_ctx_process(man, ctx, &dummy);
0590 
0591         /*
0592          * Add the preempted queue after the command buffer
0593          * that caused an error.
0594          */
0595         list_splice_init(&ctx->preempted, restart_head[i].prev);
0596 
0597         /*
0598          * Finally add all command buffers first in the submitted
0599          * queue, to rerun them.
0600          */
0601 
0602         ctx->block_submission = false;
0603         list_splice_init(&restart_head[i], &ctx->submitted);
0604     }
0605 
0606     vmw_cmdbuf_man_process(man);
0607     spin_unlock(&man->lock);
0608 
0609     if (global_block && vmw_cmdbuf_startstop(man, 0, true))
0610         DRM_ERROR("Failed restarting command buffer contexts\n");
0611 
0612     /* Send a new fence in case one was removed */
0613     if (send_fence) {
0614         vmw_cmd_send_fence(man->dev_priv, &dummy);
0615         wake_up_all(&man->idle_queue);
0616     }
0617 
0618     mutex_unlock(&man->error_mutex);
0619 }
0620 
0621 /**
0622  * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
0623  *
0624  * @man: The command buffer manager.
0625  * @check_preempted: Check also the preempted queue for pending command buffers.
0626  *
0627  */
0628 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
0629                 bool check_preempted)
0630 {
0631     struct vmw_cmdbuf_context *ctx;
0632     bool idle = false;
0633     int i;
0634 
0635     spin_lock(&man->lock);
0636     vmw_cmdbuf_man_process(man);
0637     for_each_cmdbuf_ctx(man, i, ctx) {
0638         if (!list_empty(&ctx->submitted) ||
0639             !list_empty(&ctx->hw_submitted) ||
0640             (check_preempted && !list_empty(&ctx->preempted)))
0641             goto out_unlock;
0642     }
0643 
0644     idle = list_empty(&man->error);
0645 
0646 out_unlock:
0647     spin_unlock(&man->lock);
0648 
0649     return idle;
0650 }
0651 
0652 /**
0653  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
0654  * command submissions
0655  *
0656  * @man: The command buffer manager.
0657  *
0658  * Flushes the current command buffer without allocating a new one. A new one
0659  * is automatically allocated when needed. Call with @man->cur_mutex held.
0660  */
0661 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
0662 {
0663     struct vmw_cmdbuf_header *cur = man->cur;
0664 
0665     lockdep_assert_held_once(&man->cur_mutex);
0666 
0667     if (!cur)
0668         return;
0669 
0670     spin_lock(&man->lock);
0671     if (man->cur_pos == 0) {
0672         __vmw_cmdbuf_header_free(cur);
0673         goto out_unlock;
0674     }
0675 
0676     man->cur->cb_header->length = man->cur_pos;
0677     vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
0678 out_unlock:
0679     spin_unlock(&man->lock);
0680     man->cur = NULL;
0681     man->cur_pos = 0;
0682 }
0683 
0684 /**
0685  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
0686  * command submissions
0687  *
0688  * @man: The command buffer manager.
0689  * @interruptible: Whether to sleep interruptible when sleeping.
0690  *
0691  * Flushes the current command buffer without allocating a new one. A new one
0692  * is automatically allocated when needed.
0693  */
0694 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
0695              bool interruptible)
0696 {
0697     int ret = vmw_cmdbuf_cur_lock(man, interruptible);
0698 
0699     if (ret)
0700         return ret;
0701 
0702     __vmw_cmdbuf_cur_flush(man);
0703     vmw_cmdbuf_cur_unlock(man);
0704 
0705     return 0;
0706 }
0707 
0708 /**
0709  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
0710  *
0711  * @man: The command buffer manager.
0712  * @interruptible: Sleep interruptible while waiting.
0713  * @timeout: Time out after this many ticks.
0714  *
0715  * Wait until the command buffer manager has processed all command buffers,
0716  * or until a timeout occurs. If a timeout occurs, the function will return
0717  * -EBUSY.
0718  */
0719 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
0720             unsigned long timeout)
0721 {
0722     int ret;
0723 
0724     ret = vmw_cmdbuf_cur_flush(man, interruptible);
0725     vmw_generic_waiter_add(man->dev_priv,
0726                    SVGA_IRQFLAG_COMMAND_BUFFER,
0727                    &man->dev_priv->cmdbuf_waiters);
0728 
0729     if (interruptible) {
0730         ret = wait_event_interruptible_timeout
0731             (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
0732              timeout);
0733     } else {
0734         ret = wait_event_timeout
0735             (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
0736              timeout);
0737     }
0738     vmw_generic_waiter_remove(man->dev_priv,
0739                   SVGA_IRQFLAG_COMMAND_BUFFER,
0740                   &man->dev_priv->cmdbuf_waiters);
0741     if (ret == 0) {
0742         if (!vmw_cmdbuf_man_idle(man, true))
0743             ret = -EBUSY;
0744         else
0745             ret = 0;
0746     }
0747     if (ret > 0)
0748         ret = 0;
0749 
0750     return ret;
0751 }
0752 
0753 /**
0754  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
0755  *
0756  * @man: The command buffer manager.
0757  * @info: Allocation info. Will hold the size on entry and allocated mm node
0758  * on successful return.
0759  *
0760  * Try to allocate buffer space from the main pool. Returns true if succeeded.
0761  * If a fatal error was hit, the error code is returned in @info->ret.
0762  */
0763 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0764                  struct vmw_cmdbuf_alloc_info *info)
0765 {
0766     int ret;
0767 
0768     if (info->done)
0769         return true;
0770 
0771     memset(info->node, 0, sizeof(*info->node));
0772     spin_lock(&man->lock);
0773     ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
0774     if (ret) {
0775         vmw_cmdbuf_man_process(man);
0776         ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
0777     }
0778 
0779     spin_unlock(&man->lock);
0780     info->done = !ret;
0781 
0782     return info->done;
0783 }
0784 
0785 /**
0786  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
0787  *
0788  * @man: The command buffer manager.
0789  * @node: Pointer to pre-allocated range-manager node.
0790  * @size: The size of the allocation.
0791  * @interruptible: Whether to sleep interruptible while waiting for space.
0792  *
0793  * This function allocates buffer space from the main pool, and if there is
0794  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
0795  * become available.
0796  */
0797 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
0798                   struct drm_mm_node *node,
0799                   size_t size,
0800                   bool interruptible)
0801 {
0802     struct vmw_cmdbuf_alloc_info info;
0803 
0804     info.page_size = PFN_UP(size);
0805     info.node = node;
0806     info.done = false;
0807 
0808     /*
0809      * To prevent starvation of large requests, only one allocating call
0810      * at a time waiting for space.
0811      */
0812     if (interruptible) {
0813         if (mutex_lock_interruptible(&man->space_mutex))
0814             return -ERESTARTSYS;
0815     } else {
0816         mutex_lock(&man->space_mutex);
0817     }
0818 
0819     /* Try to allocate space without waiting. */
0820     if (vmw_cmdbuf_try_alloc(man, &info))
0821         goto out_unlock;
0822 
0823     vmw_generic_waiter_add(man->dev_priv,
0824                    SVGA_IRQFLAG_COMMAND_BUFFER,
0825                    &man->dev_priv->cmdbuf_waiters);
0826 
0827     if (interruptible) {
0828         int ret;
0829 
0830         ret = wait_event_interruptible
0831             (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
0832         if (ret) {
0833             vmw_generic_waiter_remove
0834                 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
0835                  &man->dev_priv->cmdbuf_waiters);
0836             mutex_unlock(&man->space_mutex);
0837             return ret;
0838         }
0839     } else {
0840         wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
0841     }
0842     vmw_generic_waiter_remove(man->dev_priv,
0843                   SVGA_IRQFLAG_COMMAND_BUFFER,
0844                   &man->dev_priv->cmdbuf_waiters);
0845 
0846 out_unlock:
0847     mutex_unlock(&man->space_mutex);
0848 
0849     return 0;
0850 }
0851 
0852 /**
0853  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
0854  * space from the main pool.
0855  *
0856  * @man: The command buffer manager.
0857  * @header: Pointer to the header to set up.
0858  * @size: The requested size of the buffer space.
0859  * @interruptible: Whether to sleep interruptible while waiting for space.
0860  */
0861 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
0862                  struct vmw_cmdbuf_header *header,
0863                  size_t size,
0864                  bool interruptible)
0865 {
0866     SVGACBHeader *cb_hdr;
0867     size_t offset;
0868     int ret;
0869 
0870     if (!man->has_pool)
0871         return -ENOMEM;
0872 
0873     ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
0874 
0875     if (ret)
0876         return ret;
0877 
0878     header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
0879                         &header->handle);
0880     if (!header->cb_header) {
0881         ret = -ENOMEM;
0882         goto out_no_cb_header;
0883     }
0884 
0885     header->size = header->node.size << PAGE_SHIFT;
0886     cb_hdr = header->cb_header;
0887     offset = header->node.start << PAGE_SHIFT;
0888     header->cmd = man->map + offset;
0889     if (man->using_mob) {
0890         cb_hdr->flags = SVGA_CB_FLAG_MOB;
0891         cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
0892         cb_hdr->ptr.mob.mobOffset = offset;
0893     } else {
0894         cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
0895     }
0896 
0897     return 0;
0898 
0899 out_no_cb_header:
0900     spin_lock(&man->lock);
0901     drm_mm_remove_node(&header->node);
0902     spin_unlock(&man->lock);
0903 
0904     return ret;
0905 }
0906 
0907 /**
0908  * vmw_cmdbuf_space_inline - Set up a command buffer header with
0909  * inline command buffer space.
0910  *
0911  * @man: The command buffer manager.
0912  * @header: Pointer to the header to set up.
0913  * @size: The requested size of the buffer space.
0914  */
0915 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
0916                    struct vmw_cmdbuf_header *header,
0917                    int size)
0918 {
0919     struct vmw_cmdbuf_dheader *dheader;
0920     SVGACBHeader *cb_hdr;
0921 
0922     if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
0923         return -ENOMEM;
0924 
0925     dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
0926                   &header->handle);
0927     if (!dheader)
0928         return -ENOMEM;
0929 
0930     header->inline_space = true;
0931     header->size = VMW_CMDBUF_INLINE_SIZE;
0932     cb_hdr = &dheader->cb_header;
0933     header->cb_header = cb_hdr;
0934     header->cmd = dheader->cmd;
0935     cb_hdr->status = SVGA_CB_STATUS_NONE;
0936     cb_hdr->flags = SVGA_CB_FLAG_NONE;
0937     cb_hdr->ptr.pa = (u64)header->handle +
0938         (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
0939 
0940     return 0;
0941 }
0942 
0943 /**
0944  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
0945  * command buffer space.
0946  *
0947  * @man: The command buffer manager.
0948  * @size: The requested size of the buffer space.
0949  * @interruptible: Whether to sleep interruptible while waiting for space.
0950  * @p_header: points to a header pointer to populate on successful return.
0951  *
0952  * Returns a pointer to command buffer space if successful. Otherwise
0953  * returns an error pointer. The header pointer returned in @p_header should
0954  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
0955  */
0956 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
0957                size_t size, bool interruptible,
0958                struct vmw_cmdbuf_header **p_header)
0959 {
0960     struct vmw_cmdbuf_header *header;
0961     int ret = 0;
0962 
0963     *p_header = NULL;
0964 
0965     header = kzalloc(sizeof(*header), GFP_KERNEL);
0966     if (!header)
0967         return ERR_PTR(-ENOMEM);
0968 
0969     if (size <= VMW_CMDBUF_INLINE_SIZE)
0970         ret = vmw_cmdbuf_space_inline(man, header, size);
0971     else
0972         ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
0973 
0974     if (ret) {
0975         kfree(header);
0976         return ERR_PTR(ret);
0977     }
0978 
0979     header->man = man;
0980     INIT_LIST_HEAD(&header->list);
0981     header->cb_header->status = SVGA_CB_STATUS_NONE;
0982     *p_header = header;
0983 
0984     return header->cmd;
0985 }
0986 
0987 /**
0988  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
0989  * command buffer.
0990  *
0991  * @man: The command buffer manager.
0992  * @size: The requested size of the commands.
0993  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
0994  * @interruptible: Whether to sleep interruptible while waiting for space.
0995  *
0996  * Returns a pointer to command buffer space if successful. Otherwise
0997  * returns an error pointer.
0998  */
0999 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1000                     size_t size,
1001                     int ctx_id,
1002                     bool interruptible)
1003 {
1004     struct vmw_cmdbuf_header *cur;
1005     void *ret;
1006 
1007     if (vmw_cmdbuf_cur_lock(man, interruptible))
1008         return ERR_PTR(-ERESTARTSYS);
1009 
1010     cur = man->cur;
1011     if (cur && (size + man->cur_pos > cur->size ||
1012             ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013              ctx_id != cur->cb_header->dxContext)))
1014         __vmw_cmdbuf_cur_flush(man);
1015 
1016     if (!man->cur) {
1017         ret = vmw_cmdbuf_alloc(man,
1018                        max_t(size_t, size, man->default_size),
1019                        interruptible, &man->cur);
1020         if (IS_ERR(ret)) {
1021             vmw_cmdbuf_cur_unlock(man);
1022             return ret;
1023         }
1024 
1025         cur = man->cur;
1026     }
1027 
1028     if (ctx_id != SVGA3D_INVALID_ID) {
1029         cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030         cur->cb_header->dxContext = ctx_id;
1031     }
1032 
1033     cur->reserved = size;
1034 
1035     return (void *) (man->cur->cmd + man->cur_pos);
1036 }
1037 
1038 /**
1039  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1040  *
1041  * @man: The command buffer manager.
1042  * @size: The size of the commands actually written.
1043  * @flush: Whether to flush the command buffer immediately.
1044  */
1045 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046                   size_t size, bool flush)
1047 {
1048     struct vmw_cmdbuf_header *cur = man->cur;
1049 
1050     lockdep_assert_held_once(&man->cur_mutex);
1051 
1052     WARN_ON(size > cur->reserved);
1053     man->cur_pos += size;
1054     if (!size)
1055         cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1056     if (flush)
1057         __vmw_cmdbuf_cur_flush(man);
1058     vmw_cmdbuf_cur_unlock(man);
1059 }
1060 
1061 /**
1062  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1063  *
1064  * @man: The command buffer manager.
1065  * @size: The requested size of the commands.
1066  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1067  * @interruptible: Whether to sleep interruptible while waiting for space.
1068  * @header: Header of the command buffer. NULL if the current command buffer
1069  * should be used.
1070  *
1071  * Returns a pointer to command buffer space if successful. Otherwise
1072  * returns an error pointer.
1073  */
1074 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075              int ctx_id, bool interruptible,
1076              struct vmw_cmdbuf_header *header)
1077 {
1078     if (!header)
1079         return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1080 
1081     if (size > header->size)
1082         return ERR_PTR(-EINVAL);
1083 
1084     if (ctx_id != SVGA3D_INVALID_ID) {
1085         header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086         header->cb_header->dxContext = ctx_id;
1087     }
1088 
1089     header->reserved = size;
1090     return header->cmd;
1091 }
1092 
1093 /**
1094  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1095  *
1096  * @man: The command buffer manager.
1097  * @size: The size of the commands actually written.
1098  * @header: Header of the command buffer. NULL if the current command buffer
1099  * should be used.
1100  * @flush: Whether to flush the command buffer immediately.
1101  */
1102 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103                struct vmw_cmdbuf_header *header, bool flush)
1104 {
1105     if (!header) {
1106         vmw_cmdbuf_commit_cur(man, size, flush);
1107         return;
1108     }
1109 
1110     (void) vmw_cmdbuf_cur_lock(man, false);
1111     __vmw_cmdbuf_cur_flush(man);
1112     WARN_ON(size > header->reserved);
1113     man->cur = header;
1114     man->cur_pos = size;
1115     if (!size)
1116         header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1117     if (flush)
1118         __vmw_cmdbuf_cur_flush(man);
1119     vmw_cmdbuf_cur_unlock(man);
1120 }
1121 
1122 
1123 /**
1124  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1125  *
1126  * @man: The command buffer manager.
1127  * @command: Pointer to the command to send.
1128  * @size: Size of the command.
1129  *
1130  * Synchronously sends a device context command.
1131  */
1132 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133                       const void *command,
1134                       size_t size)
1135 {
1136     struct vmw_cmdbuf_header *header;
1137     int status;
1138     void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1139 
1140     if (IS_ERR(cmd))
1141         return PTR_ERR(cmd);
1142 
1143     memcpy(cmd, command, size);
1144     header->cb_header->length = size;
1145     header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146     spin_lock(&man->lock);
1147     status = vmw_cmdbuf_header_submit(header);
1148     spin_unlock(&man->lock);
1149     vmw_cmdbuf_header_free(header);
1150 
1151     if (status != SVGA_CB_STATUS_COMPLETED) {
1152         DRM_ERROR("Device context command failed with status %d\n",
1153               status);
1154         return -EINVAL;
1155     }
1156 
1157     return 0;
1158 }
1159 
1160 /**
1161  * vmw_cmdbuf_preempt - Send a preempt command through the device
1162  * context.
1163  *
1164  * @man: The command buffer manager.
1165  * @context: Device context to pass command through.
1166  *
1167  * Synchronously sends a preempt command.
1168  */
1169 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1170 {
1171     struct {
1172         uint32 id;
1173         SVGADCCmdPreempt body;
1174     } __packed cmd;
1175 
1176     cmd.id = SVGA_DC_CMD_PREEMPT;
1177     cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178     cmd.body.ignoreIDZero = 0;
1179 
1180     return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1181 }
1182 
1183 
1184 /**
1185  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1186  * context.
1187  *
1188  * @man: The command buffer manager.
1189  * @context: Device context to start/stop.
1190  * @enable: Whether to enable or disable the context.
1191  *
1192  * Synchronously sends a device start / stop context command.
1193  */
1194 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1195                 bool enable)
1196 {
1197     struct {
1198         uint32 id;
1199         SVGADCCmdStartStop body;
1200     } __packed cmd;
1201 
1202     cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203     cmd.body.enable = (enable) ? 1 : 0;
1204     cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1205 
1206     return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1207 }
1208 
1209 /**
1210  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1211  *
1212  * @man: The command buffer manager.
1213  * @size: The size of the main space pool.
1214  *
1215  * Set the size and allocate the main command buffer space pool.
1216  * If successful, this enables large command submissions.
1217  * Note that this function requires that rudimentary command
1218  * submission is already available and that the MOB memory manager is alive.
1219  * Returns 0 on success. Negative error code on failure.
1220  */
1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222 {
1223     struct vmw_private *dev_priv = man->dev_priv;
1224     bool dummy;
1225     int ret;
1226 
1227     if (man->has_pool)
1228         return -EINVAL;
1229 
1230     /* First, try to allocate a huge chunk of DMA memory */
1231     size = PAGE_ALIGN(size);
1232     man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233                       &man->handle, GFP_KERNEL);
1234     if (man->map) {
1235         man->using_mob = false;
1236     } else {
1237         /*
1238          * DMA memory failed. If we can have command buffers in a
1239          * MOB, try to use that instead. Note that this will
1240          * actually call into the already enabled manager, when
1241          * binding the MOB.
1242          */
1243         if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1244             !dev_priv->has_mob)
1245             return -ENOMEM;
1246 
1247         ret = vmw_bo_create_kernel(dev_priv, size,
1248                        &vmw_mob_placement,
1249                        &man->cmd_space);
1250         if (ret)
1251             return ret;
1252 
1253         man->using_mob = true;
1254         ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255                   &man->map_obj);
1256         if (ret)
1257             goto out_no_map;
1258 
1259         man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260     }
1261 
1262     man->size = size;
1263     drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264 
1265     man->has_pool = true;
1266 
1267     /*
1268      * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1269      * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1270      * needs to wait for space and we block on further command
1271      * submissions to be able to free up space.
1272      */
1273     man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274     drm_info(&dev_priv->drm,
1275          "Using command buffers with %s pool.\n",
1276          (man->using_mob) ? "MOB" : "DMA");
1277 
1278     return 0;
1279 
1280 out_no_map:
1281     if (man->using_mob) {
1282         ttm_bo_put(man->cmd_space);
1283         man->cmd_space = NULL;
1284     }
1285 
1286     return ret;
1287 }
1288 
1289 /**
1290  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291  * inline command buffer submissions only.
1292  *
1293  * @dev_priv: Pointer to device private structure.
1294  *
1295  * Returns a pointer to a cummand buffer manager to success or error pointer
1296  * on failure. The command buffer manager will be enabled for submissions of
1297  * size VMW_CMDBUF_INLINE_SIZE only.
1298  */
1299 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300 {
1301     struct vmw_cmdbuf_man *man;
1302     struct vmw_cmdbuf_context *ctx;
1303     unsigned int i;
1304     int ret;
1305 
1306     if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307         return ERR_PTR(-ENOSYS);
1308 
1309     man = kzalloc(sizeof(*man), GFP_KERNEL);
1310     if (!man)
1311         return ERR_PTR(-ENOMEM);
1312 
1313     man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314         2 : 1;
1315     man->headers = dma_pool_create("vmwgfx cmdbuf",
1316                        dev_priv->drm.dev,
1317                        sizeof(SVGACBHeader),
1318                        64, PAGE_SIZE);
1319     if (!man->headers) {
1320         ret = -ENOMEM;
1321         goto out_no_pool;
1322     }
1323 
1324     man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325                     dev_priv->drm.dev,
1326                     sizeof(struct vmw_cmdbuf_dheader),
1327                     64, PAGE_SIZE);
1328     if (!man->dheaders) {
1329         ret = -ENOMEM;
1330         goto out_no_dpool;
1331     }
1332 
1333     for_each_cmdbuf_ctx(man, i, ctx)
1334         vmw_cmdbuf_ctx_init(ctx);
1335 
1336     INIT_LIST_HEAD(&man->error);
1337     spin_lock_init(&man->lock);
1338     mutex_init(&man->cur_mutex);
1339     mutex_init(&man->space_mutex);
1340     mutex_init(&man->error_mutex);
1341     man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342     init_waitqueue_head(&man->alloc_queue);
1343     init_waitqueue_head(&man->idle_queue);
1344     man->dev_priv = dev_priv;
1345     man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346     INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347     vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348                    &dev_priv->error_waiters);
1349     ret = vmw_cmdbuf_startstop(man, 0, true);
1350     if (ret) {
1351         DRM_ERROR("Failed starting command buffer contexts\n");
1352         vmw_cmdbuf_man_destroy(man);
1353         return ERR_PTR(ret);
1354     }
1355 
1356     return man;
1357 
1358 out_no_dpool:
1359     dma_pool_destroy(man->headers);
1360 out_no_pool:
1361     kfree(man);
1362 
1363     return ERR_PTR(ret);
1364 }
1365 
1366 /**
1367  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368  *
1369  * @man: Pointer to a command buffer manager.
1370  *
1371  * This function removes the main buffer space pool, and should be called
1372  * before MOB memory management is removed. When this function has been called,
1373  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374  * less are allowed, and the default size of the command buffer for small kernel
1375  * submissions is also set to this size.
1376  */
1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378 {
1379     if (!man->has_pool)
1380         return;
1381 
1382     man->has_pool = false;
1383     man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384     (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385     if (man->using_mob) {
1386         (void) ttm_bo_kunmap(&man->map_obj);
1387         ttm_bo_put(man->cmd_space);
1388         man->cmd_space = NULL;
1389     } else {
1390         dma_free_coherent(man->dev_priv->drm.dev,
1391                   man->size, man->map, man->handle);
1392     }
1393 }
1394 
1395 /**
1396  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397  *
1398  * @man: Pointer to a command buffer manager.
1399  *
1400  * This function idles and then destroys a command buffer manager.
1401  */
1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403 {
1404     WARN_ON_ONCE(man->has_pool);
1405     (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406 
1407     if (vmw_cmdbuf_startstop(man, 0, false))
1408         DRM_ERROR("Failed stopping command buffer contexts.\n");
1409 
1410     vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411                   &man->dev_priv->error_waiters);
1412     (void) cancel_work_sync(&man->work);
1413     dma_pool_destroy(man->dheaders);
1414     dma_pool_destroy(man->headers);
1415     mutex_destroy(&man->cur_mutex);
1416     mutex_destroy(&man->space_mutex);
1417     mutex_destroy(&man->error_mutex);
1418     kfree(man);
1419 }